VirtualBox

source: vbox/trunk/src/VBox/Devices/PC/DevDMA.cpp@ 10719

Last change on this file since 10719 was 8155, checked in by vboxsync, 17 years ago

The Big Sun Rebranding Header Change

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.4 KB
Line 
1/* $Id: DevDMA.cpp 8155 2008-04-18 15:16:47Z vboxsync $ */
2/** @file
3 * DMA Controller Device.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 * --------------------------------------------------------------------
21 *
22 * This code is based on:
23 *
24 * QEMU DMA emulation
25 *
26 * Copyright (c) 2003 Vassili Karpov (malc)
27 *
28 * Permission is hereby granted, free of charge, to any person obtaining a copy
29 * of this software and associated documentation files (the "Software"), to deal
30 * in the Software without restriction, including without limitation the rights
31 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
32 * copies of the Software, and to permit persons to whom the Software is
33 * furnished to do so, subject to the following conditions:
34 *
35 * The above copyright notice and this permission notice shall be included in
36 * all copies or substantial portions of the Software.
37 *
38 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
41 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
42 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
43 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 * THE SOFTWARE.
45 */
46
47#ifdef VBOX
48
49/*******************************************************************************
50* Header Files *
51*******************************************************************************/
52#include <VBox/pdmdev.h>
53#include <VBox/err.h>
54
55#define LOG_GROUP LOG_GROUP_DEFAULT ///@todo LOG_GROUP_DEV_DMA
56#include <VBox/log.h>
57#include <iprt/assert.h>
58#include <iprt/uuid.h>
59#include <iprt/string.h>
60
61#include <stdio.h>
62#include <stdlib.h>
63
64#include "Builtins.h"
65#include "../vl_vbox.h"
66typedef PFNDMATRANSFERHANDLER DMA_transfer_handler;
67
68#else /* !VBOX */
69#include "vl.h"
70#endif
71
72/* #define DEBUG_DMA */
73
74#ifndef VBOX
75#ifndef __WIN32__
76#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
77#ifdef DEBUG_DMA
78#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
79#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
80#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
81#else
82#define lwarn(...)
83#define linfo(...)
84#define ldebug(...)
85#endif
86#else
87#define dolog()
88#define lwarn()
89#define linfo()
90#define ldebug()
91#endif
92#else /* VBOX */
93
94#ifdef LOG_ENABLED
95#endif
96# ifdef LOG_ENABLED
97# define DEBUG_DMA
98 static void DMA_DPRINTF (const char *fmt, ...)
99 {
100 if (LogIsEnabled ()) {
101 va_list args;
102 va_start (args, fmt);
103 RTLogLogger (NULL, NULL, "dma: %N", fmt, &args); /* %N - nested va_list * type formatting call. */
104 va_end (args);
105 }
106 }
107# else
108 DECLINLINE(void) DMA_DPRINTF(const char *pszFmt, ...) {}
109# endif
110
111#define dolog DMA_DPRINTF
112#define lwarn DMA_DPRINTF
113#define linfo DMA_DPRINTF
114#define ldebug DMA_DPRINTF
115
116#endif /* VBOX */
117
118#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
119
120struct dma_regs {
121 unsigned int now[2];
122 uint16_t base[2];
123 uint8_t mode;
124 uint8_t page;
125 uint8_t pageh;
126 uint8_t dack;
127 uint8_t eop;
128 DMA_transfer_handler transfer_handler;
129 void *opaque;
130};
131
132#define ADDR 0
133#define COUNT 1
134
135struct dma_cont {
136 uint8_t status;
137 uint8_t command;
138 uint8_t mask;
139 uint8_t flip_flop;
140 unsigned int dshift;
141 struct dma_regs regs[4];
142};
143
144typedef struct {
145 PPDMDEVINS pDevIns;
146 PCPDMDMACHLP pHlp;
147 struct dma_cont dma_controllers[2];
148} DMAState;
149
150enum {
151 CMD_MEMORY_TO_MEMORY = 0x01,
152 CMD_FIXED_ADDRESS = 0x02,
153 CMD_BLOCK_CONTROLLER = 0x04,
154 CMD_COMPRESSED_TIME = 0x08,
155 CMD_CYCLIC_PRIORITY = 0x10,
156 CMD_EXTENDED_WRITE = 0x20,
157 CMD_LOW_DREQ = 0x40,
158 CMD_LOW_DACK = 0x80,
159 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
160 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
161 | CMD_LOW_DREQ | CMD_LOW_DACK
162
163};
164
165static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
166
167static void write_page (void *opaque, uint32_t nport, uint32_t data)
168{
169 struct dma_cont *d = (struct dma_cont*)opaque;
170 int ichan;
171
172 ichan = channels[nport & 7];
173 if (-1 == ichan) {
174 dolog ("invalid channel %#x %#x\n", nport, data);
175 return;
176 }
177 d->regs[ichan].page = data;
178}
179
180static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
181{
182 struct dma_cont *d = (struct dma_cont*)opaque;
183 int ichan;
184
185 ichan = channels[nport & 7];
186 if (-1 == ichan) {
187 dolog ("invalid channel %#x %#x\n", nport, data);
188 return;
189 }
190 d->regs[ichan].pageh = data;
191}
192
193static uint32_t read_page (void *opaque, uint32_t nport)
194{
195 struct dma_cont *d = (struct dma_cont*)opaque;
196 int ichan;
197
198 ichan = channels[nport & 7];
199 if (-1 == ichan) {
200 dolog ("invalid channel read %#x\n", nport);
201 return 0;
202 }
203 return d->regs[ichan].page;
204}
205
206static uint32_t read_pageh (void *opaque, uint32_t nport)
207{
208 struct dma_cont *d = (struct dma_cont*)opaque;
209 int ichan;
210
211 ichan = channels[nport & 7];
212 if (-1 == ichan) {
213 dolog ("invalid channel read %#x\n", nport);
214 return 0;
215 }
216 return d->regs[ichan].pageh;
217}
218
219static inline void init_chan (struct dma_cont *d, int ichan)
220{
221 struct dma_regs *r;
222
223 r = d->regs + ichan;
224 r->now[ADDR] = r->base[ADDR] << d->dshift;
225 r->now[COUNT] = 0;
226}
227
228static inline int getff (struct dma_cont *d)
229{
230 int ff;
231
232 ff = d->flip_flop;
233 d->flip_flop = !ff;
234 return ff;
235}
236
237static uint32_t read_chan (void *opaque, uint32_t nport)
238{
239 struct dma_cont *d = (struct dma_cont*)opaque;
240 int ichan, nreg, iport, ff, val, dir;
241 struct dma_regs *r;
242
243 iport = (nport >> d->dshift) & 0x0f;
244 ichan = iport >> 1;
245 nreg = iport & 1;
246 r = d->regs + ichan;
247
248 dir = ((r->mode >> 5) & 1) ? -1 : 1;
249 ff = getff (d);
250 if (nreg)
251 val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
252 else
253 val = r->now[ADDR] + r->now[COUNT] * dir;
254
255 ldebug ("read_chan %#x -> %d\n", iport, val);
256 return (val >> (d->dshift + (ff << 3))) & 0xff;
257}
258
259static void write_chan (void *opaque, uint32_t nport, uint32_t data)
260{
261 struct dma_cont *d = (struct dma_cont*)opaque;
262 int iport, ichan, nreg;
263 struct dma_regs *r;
264
265 iport = (nport >> d->dshift) & 0x0f;
266 ichan = iport >> 1;
267 nreg = iport & 1;
268 r = d->regs + ichan;
269 if (getff (d)) {
270 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
271 init_chan (d, ichan);
272 } else {
273 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
274 }
275}
276
277static void write_cont (void *opaque, uint32_t nport, uint32_t data)
278{
279 struct dma_cont *d = (struct dma_cont*)opaque;
280 int iport, ichan = 0;
281
282 iport = (nport >> d->dshift) & 0x0f;
283 switch (iport) {
284 case 0x08: /* command */
285 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
286 dolog ("command %#x not supported\n", data);
287 return;
288 }
289 d->command = data;
290 break;
291
292 case 0x09:
293 ichan = data & 3;
294 if (data & 4) {
295 d->status |= 1 << (ichan + 4);
296 }
297 else {
298 d->status &= ~(1 << (ichan + 4));
299 }
300 d->status &= ~(1 << ichan);
301 break;
302
303 case 0x0a: /* single mask */
304 if (data & 4)
305 d->mask |= 1 << (data & 3);
306 else
307 d->mask &= ~(1 << (data & 3));
308 break;
309
310 case 0x0b: /* mode */
311 {
312 ichan = data & 3;
313#ifdef DEBUG_DMA
314 {
315 int op, ai, dir, opmode;
316 op = (data >> 2) & 3;
317 ai = (data >> 4) & 1;
318 dir = (data >> 5) & 1;
319 opmode = (data >> 6) & 3;
320
321 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
322 ichan, op, ai, dir, opmode);
323 }
324#endif
325 d->regs[ichan].mode = data;
326 break;
327 }
328
329 case 0x0c: /* clear flip flop */
330 d->flip_flop = 0;
331 break;
332
333 case 0x0d: /* reset */
334 d->flip_flop = 0;
335 d->mask = ~0;
336 d->status = 0;
337 d->command = 0;
338 break;
339
340 case 0x0e: /* clear mask for all channels */
341 d->mask = 0;
342 break;
343
344 case 0x0f: /* write mask for all channels */
345 d->mask = data;
346 break;
347
348 default:
349 dolog ("unknown iport %#x\n", iport);
350 break;
351 }
352
353#ifdef DEBUG_DMA
354 if (0xc != iport) {
355 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
356 nport, ichan, data);
357 }
358#endif
359}
360
361static uint32_t read_cont (void *opaque, uint32_t nport)
362{
363 struct dma_cont *d = (struct dma_cont*)opaque;
364 int iport, val;
365
366 iport = (nport >> d->dshift) & 0x0f;
367 switch (iport) {
368 case 0x08: /* status */
369 val = d->status;
370 d->status &= 0xf0;
371 break;
372 case 0x0f: /* mask */
373 val = d->mask;
374 break;
375 default:
376 val = 0;
377 break;
378 }
379
380 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
381 return val;
382}
383
384static uint8_t DMA_get_channel_mode (DMAState *s, int nchan)
385{
386 return s->dma_controllers[nchan > 3].regs[nchan & 3].mode;
387}
388
389static void DMA_hold_DREQ (DMAState *s, int nchan)
390{
391 int ncont, ichan;
392
393 ncont = nchan > 3;
394 ichan = nchan & 3;
395 linfo ("held cont=%d chan=%d\n", ncont, ichan);
396 s->dma_controllers[ncont].status |= 1 << (ichan + 4);
397}
398
399static void DMA_release_DREQ (DMAState *s, int nchan)
400{
401 int ncont, ichan;
402
403 ncont = nchan > 3;
404 ichan = nchan & 3;
405 linfo ("released cont=%d chan=%d\n", ncont, ichan);
406 s->dma_controllers[ncont].status &= ~(1 << (ichan + 4));
407}
408
409static void channel_run (DMAState *s, int ncont, int ichan)
410{
411 int n;
412 struct dma_regs *r = &s->dma_controllers[ncont].regs[ichan];
413#ifdef DEBUG_DMA
414 int dir, opmode;
415
416 dir = (r->mode >> 5) & 1;
417 opmode = (r->mode >> 6) & 3;
418
419 if (dir) {
420 dolog ("DMA in address decrement mode\n");
421 }
422 if (opmode != 1) {
423 dolog ("DMA not in single mode select %#x\n", opmode);
424 }
425#endif
426
427 r = s->dma_controllers[ncont].regs + ichan;
428 n = r->transfer_handler (s->pDevIns, r->opaque, ichan + (ncont << 2),
429 r->now[COUNT], (r->base[COUNT] + 1) << ncont);
430 r->now[COUNT] = n;
431 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
432}
433
434static void DMA_run (DMAState *s)
435{
436 struct dma_cont *d;
437 int icont, ichan;
438
439 d = s->dma_controllers;
440
441 for (icont = 0; icont < 2; icont++, d++) {
442 for (ichan = 0; ichan < 4; ichan++) {
443 int mask;
444
445 mask = 1 << ichan;
446
447 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
448 channel_run (s, icont, ichan);
449 }
450 }
451}
452
453static void DMA_register_channel (DMAState *s, unsigned nchan,
454 DMA_transfer_handler transfer_handler,
455 void *opaque)
456{
457 struct dma_regs *r;
458 int ichan, ncont;
459 LogFlow (("DMA_register_channel: s=%p nchan=%d transfer_handler=%p opaque=%p\n",
460 s, nchan, transfer_handler, opaque));
461
462 ncont = nchan > 3;
463 ichan = nchan & 3;
464
465 r = s->dma_controllers[ncont].regs + ichan;
466 r->transfer_handler = transfer_handler;
467 r->opaque = opaque;
468}
469
470static uint32_t DMA_read_memory (DMAState *s,
471 unsigned nchan,
472 void *buf,
473 uint32_t pos,
474 uint32_t len)
475{
476 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
477 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
478
479 if (r->mode & 0x20) {
480 unsigned i;
481 uint8_t *p = (uint8_t*)buf;
482
483#ifdef VBOX
484 PDMDevHlpPhysRead (s->pDevIns, addr - pos - len, buf, len);
485#else
486 cpu_physical_memory_read (addr - pos - len, buf, len);
487#endif
488 /* What about 16bit transfers? */
489 for (i = 0; i < len >> 1; i++) {
490 uint8_t b = p[len - i - 1];
491 p[i] = b;
492 }
493 }
494 else
495#ifdef VBOX
496 PDMDevHlpPhysRead (s->pDevIns, addr + pos, buf, len);
497#else
498 cpu_physical_memory_read (addr + pos, buf, len);
499#endif
500 return len;
501}
502
503static uint32_t DMA_write_memory (DMAState *s,
504 unsigned nchan,
505 const void *buf,
506 uint32_t pos,
507 uint32_t len)
508{
509 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
510 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
511
512 if (r->mode & 0x20) {
513 unsigned i;
514 uint8_t *p = (uint8_t *) buf;
515
516#ifdef VBOX
517 PDMDevHlpPhysWrite (s->pDevIns, addr - pos - len, buf, len);
518#else
519 cpu_physical_memory_write (addr - pos - len, buf, len);
520#endif
521 /* What about 16bit transfers? */
522 for (i = 0; i < len; i++) {
523 uint8_t b = p[len - i - 1];
524 p[i] = b;
525 }
526 }
527 else
528#ifdef VBOX
529 PDMDevHlpPhysWrite (s->pDevIns, addr + pos, buf, len);
530#else
531 cpu_physical_memory_write (addr + pos, buf, len);
532#endif
533
534 return len;
535}
536
537
538#ifndef VBOX
539/* request the emulator to transfer a new DMA memory block ASAP */
540void DMA_schedule(int nchan)
541{
542 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
543}
544#endif
545
546static void dma_reset(void *opaque)
547{
548 struct dma_cont *d = (struct dma_cont*)opaque;
549 write_cont (d, (0x0d << d->dshift), 0);
550}
551
552#ifdef VBOX
553#define IO_READ_PROTO(n) \
554static DECLCALLBACK(int) io_read_##n (PPDMDEVINS pDevIns, \
555 void *pvUser, \
556 RTIOPORT Port, \
557 uint32_t *pu32, \
558 unsigned cb)
559
560
561#define IO_WRITE_PROTO(n) \
562static DECLCALLBACK(int) io_write_##n (PPDMDEVINS pDevIns, \
563 void *pvUser, \
564 RTIOPORT Port, \
565 uint32_t u32, \
566 unsigned cb)
567
568IO_WRITE_PROTO (chan)
569{
570 if (cb == 1) {
571 write_chan (pvUser, Port, u32);
572 }
573#ifdef PARANOID
574 else {
575 Log (("Unknown write to %#x of size %d, value %#x\n",
576 Port, cb, u32));
577 }
578#endif
579 return VINF_SUCCESS;
580}
581
582IO_WRITE_PROTO (page)
583{
584 if (cb == 1) {
585 write_page (pvUser, Port, u32);
586 }
587#ifdef PARANOID
588 else {
589 Log (("Unknown write to %#x of size %d, value %#x\n",
590 Port, cb, u32));
591 }
592#endif
593 return VINF_SUCCESS;
594}
595
596IO_WRITE_PROTO (pageh)
597{
598 if (cb == 1) {
599 write_pageh (pvUser, Port, u32);
600 }
601#ifdef PARANOID
602 else {
603 Log (("Unknown write to %#x of size %d, value %#x\n",
604 Port, cb, u32));
605 }
606#endif
607 return VINF_SUCCESS;
608}
609
610IO_WRITE_PROTO (cont)
611{
612 if (cb == 1) {
613 write_cont (pvUser, Port, u32);
614 }
615#ifdef PARANOID
616 else {
617 Log (("Unknown write to %#x of size %d, value %#x\n",
618 Port, cb, u32));
619 }
620#endif
621 return VINF_SUCCESS;
622}
623
624IO_READ_PROTO (chan)
625{
626 if (cb == 1) {
627 *pu32 = read_chan (pvUser, Port);
628 return VINF_SUCCESS;
629 }
630 else {
631 return VERR_IOM_IOPORT_UNUSED;
632 }
633}
634
635IO_READ_PROTO (page)
636{
637 if (cb == 1) {
638 *pu32 = read_page (pvUser, Port);
639 return VINF_SUCCESS;
640 }
641 else {
642 return VERR_IOM_IOPORT_UNUSED;
643 }
644}
645
646IO_READ_PROTO (pageh)
647{
648 if (cb == 1) {
649 *pu32 = read_pageh (pvUser, Port);
650 return VINF_SUCCESS;
651 }
652 else {
653 return VERR_IOM_IOPORT_UNUSED;
654 }
655}
656
657IO_READ_PROTO (cont)
658{
659 if (cb == 1) {
660 *pu32 = read_cont (pvUser, Port);
661 return VINF_SUCCESS;
662 }
663 else {
664 return VERR_IOM_IOPORT_UNUSED;
665 }
666}
667#endif
668
669/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
670static void dma_init2(DMAState *s, struct dma_cont *d, int base, int dshift,
671 int page_base, int pageh_base)
672{
673 const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
674 int i;
675
676 d->dshift = dshift;
677 for (i = 0; i < 8; i++) {
678#ifdef VBOX
679 PDMDevHlpIOPortRegister (s->pDevIns, base + (i << dshift), 1, d,
680 io_write_chan, io_read_chan, NULL, NULL, "DMA");
681#else
682 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
683 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
684#endif
685 }
686 for (i = 0; i < LENOFA (page_port_list); i++) {
687#ifdef VBOX
688 PDMDevHlpIOPortRegister (s->pDevIns, page_base + page_port_list[i], 1, d,
689 io_write_page, io_read_page, NULL, NULL, "DMA Page");
690#else
691 register_ioport_write (page_base + page_port_list[i], 1, 1,
692 write_page, d);
693 register_ioport_read (page_base + page_port_list[i], 1, 1,
694 read_page, d);
695#endif
696 if (pageh_base >= 0) {
697#ifdef VBOX
698 PDMDevHlpIOPortRegister (s->pDevIns, pageh_base + page_port_list[i], 1, d,
699 io_write_pageh, io_read_pageh, NULL, NULL, "DMA Page High");
700#else
701 register_ioport_write (pageh_base + page_port_list[i], 1, 1,
702 write_pageh, d);
703 register_ioport_read (pageh_base + page_port_list[i], 1, 1,
704 read_pageh, d);
705#endif
706 }
707 }
708 for (i = 0; i < 8; i++) {
709#ifdef VBOX
710 PDMDevHlpIOPortRegister (s->pDevIns, base + ((i + 8) << dshift), 1, d,
711 io_write_cont, io_read_cont, NULL, NULL, "DMA cont");
712#else
713 register_ioport_write (base + ((i + 8) << dshift), 1, 1,
714 write_cont, d);
715 register_ioport_read (base + ((i + 8) << dshift), 1, 1,
716 read_cont, d);
717#endif
718 }
719#ifndef VBOX
720 qemu_register_reset(dma_reset, d);
721#endif
722 dma_reset(d);
723}
724
725static void dma_save (QEMUFile *f, void *opaque)
726{
727 struct dma_cont *d = (struct dma_cont*)opaque;
728 int i;
729
730 /* qemu_put_8s (f, &d->status); */
731 qemu_put_8s (f, &d->command);
732 qemu_put_8s (f, &d->mask);
733 qemu_put_8s (f, &d->flip_flop);
734 qemu_put_be32s (f, &d->dshift);
735
736 for (i = 0; i < 4; ++i) {
737 struct dma_regs *r = &d->regs[i];
738 qemu_put_be32s (f, &r->now[0]);
739 qemu_put_be32s (f, &r->now[1]);
740 qemu_put_be16s (f, &r->base[0]);
741 qemu_put_be16s (f, &r->base[1]);
742 qemu_put_8s (f, &r->mode);
743 qemu_put_8s (f, &r->page);
744 qemu_put_8s (f, &r->pageh);
745 qemu_put_8s (f, &r->dack);
746 qemu_put_8s (f, &r->eop);
747 }
748}
749
750static int dma_load (QEMUFile *f, void *opaque, int version_id)
751{
752 struct dma_cont *d = (struct dma_cont*)opaque;
753 int i;
754
755 if (version_id != 1)
756#ifdef VBOX
757 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
758#else
759 return -EINVAL;
760#endif
761
762 /* qemu_get_8s (f, &d->status); */
763 qemu_get_8s (f, &d->command);
764 qemu_get_8s (f, &d->mask);
765 qemu_get_8s (f, &d->flip_flop);
766 qemu_get_be32s (f, &d->dshift);
767
768 for (i = 0; i < 4; ++i) {
769 struct dma_regs *r = &d->regs[i];
770 qemu_get_be32s (f, &r->now[0]);
771 qemu_get_be32s (f, &r->now[1]);
772 qemu_get_be16s (f, &r->base[0]);
773 qemu_get_be16s (f, &r->base[1]);
774 qemu_get_8s (f, &r->mode);
775 qemu_get_8s (f, &r->page);
776 qemu_get_8s (f, &r->pageh);
777 qemu_get_8s (f, &r->dack);
778 qemu_get_8s (f, &r->eop);
779 }
780 return 0;
781}
782
783#ifndef VBOX
784void DMA_init (int high_page_enable)
785{
786 dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
787 high_page_enable ? 0x480 : -1);
788 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
789 high_page_enable ? 0x488 : -1);
790 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
791 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
792}
793#endif
794
795#ifdef VBOX
796static bool run_wrapper (PPDMDEVINS pDevIns)
797{
798 DMA_run (PDMINS2DATA (pDevIns, DMAState *));
799 return 0;
800}
801
802static void register_channel_wrapper (PPDMDEVINS pDevIns,
803 unsigned nchan,
804 PFNDMATRANSFERHANDLER f,
805 void *opaque)
806{
807 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
808 DMA_register_channel (s, nchan, f, opaque);
809}
810
811static uint32_t rd_mem_wrapper (PPDMDEVINS pDevIns,
812 unsigned nchan,
813 void *buf,
814 uint32_t pos,
815 uint32_t len)
816{
817 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
818 return DMA_read_memory (s, nchan, buf, pos, len);
819}
820
821static uint32_t wr_mem_wrapper (PPDMDEVINS pDevIns,
822 unsigned nchan,
823 const void *buf,
824 uint32_t pos,
825 uint32_t len)
826{
827 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
828 return DMA_write_memory (s, nchan, buf, pos, len);
829}
830
831static void set_DREQ_wrapper (PPDMDEVINS pDevIns,
832 unsigned nchan,
833 unsigned level)
834{
835 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
836 if (level) {
837 DMA_hold_DREQ (s, nchan);
838 }
839 else {
840 DMA_release_DREQ (s, nchan);
841 }
842}
843
844static uint8_t get_mode_wrapper (PPDMDEVINS pDevIns, unsigned nchan)
845{
846 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
847 return DMA_get_channel_mode (s, nchan);
848}
849
850static void DMAReset (PPDMDEVINS pDevIns)
851{
852 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
853 dma_reset (&s->dma_controllers[0]);
854 dma_reset (&s->dma_controllers[1]);
855}
856
857static DECLCALLBACK(int) SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle)
858{
859 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
860 dma_save (pSSMHandle, &s->dma_controllers[0]);
861 dma_save (pSSMHandle, &s->dma_controllers[1]);
862 return VINF_SUCCESS;
863}
864
865static DECLCALLBACK(int) LoadExec (PPDMDEVINS pDevIns,
866 PSSMHANDLE pSSMHandle,
867 uint32_t u32Version)
868{
869 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
870
871 if (u32Version != 1) {
872 AssertFailed ();
873 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
874 }
875
876 dma_load (pSSMHandle, &s->dma_controllers[0], u32Version);
877 return dma_load (pSSMHandle, &s->dma_controllers[1], u32Version);
878}
879
880/**
881 * Construct a device instance for a VM.
882 *
883 * @returns VBox status.
884 * @param pDevIns The device instance data.
885 * If the registration structure is needed, pDevIns->pDevReg points to it.
886 * @param iInstance Instance number. Use this to figure out which registers and such to use.
887 * The device number is also found in pDevIns->iInstance, but since it's
888 * likely to be freqently used PDM passes it as parameter.
889 * @param pCfgHandle Configuration node handle for the device. Use this to obtain the configuration
890 * of the device instance. It's also found in pDevIns->pCfgHandle, but like
891 * iInstance it's expected to be used a bit in this function.
892 */
893static DECLCALLBACK(int) DMAConstruct(PPDMDEVINS pDevIns,
894 int iInstance,
895 PCFGMNODE pCfgHandle)
896{
897 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
898 bool high_page_enable = 0;
899 PDMDMACREG reg;
900 int rc;
901
902 s->pDevIns = pDevIns;
903
904 /*
905 * Validate configuration.
906 */
907 if (!CFGMR3AreValuesValid(pCfgHandle, "\0")) /* "HighPageEnable\0")) */
908 return VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES;
909
910#if 0
911 rc = CFGMR3QueryBool (pCfgHandle, "HighPageEnable", &high_page_enable);
912 if (VBOX_FAILURE (rc)) {
913 return rc;
914 }
915#endif
916
917 dma_init2(s, &s->dma_controllers[0], 0x00, 0, 0x80,
918 high_page_enable ? 0x480 : -1);
919 dma_init2(s, &s->dma_controllers[1], 0xc0, 1, 0x88,
920 high_page_enable ? 0x488 : -1);
921
922 reg.u32Version = PDM_DMACREG_VERSION;
923 reg.pfnRun = run_wrapper;
924 reg.pfnRegister = register_channel_wrapper;
925 reg.pfnReadMemory = rd_mem_wrapper;
926 reg.pfnWriteMemory = wr_mem_wrapper;
927 reg.pfnSetDREQ = set_DREQ_wrapper;
928 reg.pfnGetChannelMode = get_mode_wrapper;
929
930 Assert(pDevIns->pDevHlp->pfnDMARegister);
931 rc = pDevIns->pDevHlp->pfnDMACRegister (pDevIns, &reg, &s->pHlp);
932 if (VBOX_FAILURE (rc)) {
933 return rc;
934 }
935
936 rc = PDMDevHlpSSMRegister (pDevIns, pDevIns->pDevReg->szDeviceName, iInstance, 1, sizeof (*s),
937 NULL, SaveExec, NULL, NULL, LoadExec, NULL);
938 if (VBOX_FAILURE(rc))
939 return rc;
940
941 return VINF_SUCCESS;
942}
943
944/**
945 * The device registration structure.
946 */
947const PDMDEVREG g_DeviceDMA =
948{
949 /* u32Version */
950 PDM_DEVREG_VERSION,
951 /* szDeviceName */
952 "8237A",
953 /* szGCMod */
954 "",
955 /* szR0Mod */
956 "",
957 /* pszDescription */
958 "DMA Controller Device",
959 /* fFlags */
960 PDM_DEVREG_FLAGS_HOST_BITS_DEFAULT | PDM_DEVREG_FLAGS_GUEST_BITS_DEFAULT,
961 /* fClass */
962 PDM_DEVREG_CLASS_DMA,
963 /* cMaxInstances */
964 1,
965 /* cbInstance */
966 sizeof(DMAState),
967 /* pfnConstruct */
968 DMAConstruct,
969 /* pfnDestruct */
970 NULL,
971 /* pfnRelocate */
972 NULL,
973 /* pfnIOCtl */
974 NULL,
975 /* pfnPowerOn */
976 NULL,
977 /* pfnReset */
978 DMAReset,
979 /* pfnSuspend */
980 NULL,
981 /* pfnResume */
982 NULL,
983 /* pfnAttach */
984 NULL,
985 /* pfnDetach */
986 NULL,
987 /* pfnQueryInterface. */
988 NULL,
989 /* pfnInitComplete */
990 NULL,
991 /* pfnPowerOff */
992 NULL
993};
994#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette