VirtualBox

source: vbox/trunk/src/VBox/Devices/PC/dma.c@ 1

Last change on this file since 1 was 1, checked in by vboxsync, 55 years ago

import

  • Property svn:eol-style set to native
File size: 26.2 KB
Line 
1/** @file
2 *
3 * VBox basic PC devices:
4 * DMA controller
5 */
6
7/*
8 * Copyright (C) 2006 InnoTek Systemberatung GmbH
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License as published by the Free Software Foundation,
14 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
15 * distribution. VirtualBox OSE is distributed in the hope that it will
16 * be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * If you received this file as part of a commercial VirtualBox
19 * distribution, then only the terms of your commercial VirtualBox
20 * license agreement apply instead of the previous paragraph.
21 *
22 * --------------------------------------------------------------------
23 *
24 * This code is based on:
25 *
26 * QEMU DMA emulation
27 *
28 * Copyright (c) 2003 Vassili Karpov (malc)
29 *
30 * Permission is hereby granted, free of charge, to any person obtaining a copy
31 * of this software and associated documentation files (the "Software"), to deal
32 * in the Software without restriction, including without limitation the rights
33 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
34 * copies of the Software, and to permit persons to whom the Software is
35 * furnished to do so, subject to the following conditions:
36 *
37 * The above copyright notice and this permission notice shall be included in
38 * all copies or substantial portions of the Software.
39 *
40 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
43 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
45 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
46 * THE SOFTWARE.
47 */
48
49#ifdef VBOX
50
51/*******************************************************************************
52* Header Files *
53*******************************************************************************/
54#include <VBox/pdm.h>
55#include <VBox/err.h>
56
57#define LOG_GROUP LOG_GROUP_DEFAULT ///@todo LOG_GROUP_DEV_DMA
58#include <VBox/log.h>
59#include <iprt/assert.h>
60#include <iprt/uuid.h>
61#include <iprt/string.h>
62
63#include <stdio.h>
64#include <stdlib.h>
65
66#include "Builtins.h"
67#include "../vl_vbox.h"
68typedef PFNDMATRANSFERHANDLER DMA_transfer_handler;
69
70#else /* !VBOX */
71#include "vl.h"
72#endif
73
74/* #define DEBUG_DMA */
75
76#ifndef VBOX
77#ifndef __WIN32__
78#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
79#ifdef DEBUG_DMA
80#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
81#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
82#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
83#else
84#define lwarn(...)
85#define linfo(...)
86#define ldebug(...)
87#endif
88#else
89#define dolog()
90#define lwarn()
91#define linfo()
92#define ldebug()
93#endif
94#else /* VBOX */
95
96#ifdef LOG_ENABLED
97#endif
98# ifdef LOG_ENABLED
99# define DEBUG_DMA
100 static void DMA_DPRINTF (const char *fmt, ...)
101 {
102 if (LogIsEnabled ()) {
103 va_list args;
104 va_start (args, fmt);
105 RTLogLogger (NULL, NULL, "dma: %N", fmt, &args); /* %N - nested va_list * type formatting call. */
106 va_end (args);
107 }
108 }
109# else
110 DECLINLINE(void) DMA_DPRINTF(const char *pszFmt, ...) {}
111# endif
112
113#define dolog DMA_DPRINTF
114#define lwarn DMA_DPRINTF
115#define linfo DMA_DPRINTF
116#define ldebug DMA_DPRINTF
117
118#endif /* VBOX */
119
120#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
121
122struct dma_regs {
123 unsigned int now[2];
124 uint16_t base[2];
125 uint8_t mode;
126 uint8_t page;
127 uint8_t pageh;
128 uint8_t dack;
129 uint8_t eop;
130 DMA_transfer_handler transfer_handler;
131 void *opaque;
132};
133
134#define ADDR 0
135#define COUNT 1
136
137struct dma_cont {
138 uint8_t status;
139 uint8_t command;
140 uint8_t mask;
141 uint8_t flip_flop;
142 unsigned int dshift;
143 struct dma_regs regs[4];
144};
145
146typedef struct {
147 PPDMDEVINS pDevIns;
148 PCPDMDMACHLP pHlp;
149 struct dma_cont dma_controllers[2];
150} DMAState;
151
152enum {
153 CMD_MEMORY_TO_MEMORY = 0x01,
154 CMD_FIXED_ADDRESS = 0x02,
155 CMD_BLOCK_CONTROLLER = 0x04,
156 CMD_COMPRESSED_TIME = 0x08,
157 CMD_CYCLIC_PRIORITY = 0x10,
158 CMD_EXTENDED_WRITE = 0x20,
159 CMD_LOW_DREQ = 0x40,
160 CMD_LOW_DACK = 0x80,
161 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
162 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
163 | CMD_LOW_DREQ | CMD_LOW_DACK
164
165};
166
167static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
168
169static void write_page (void *opaque, uint32_t nport, uint32_t data)
170{
171 struct dma_cont *d = opaque;
172 int ichan;
173
174 ichan = channels[nport & 7];
175 if (-1 == ichan) {
176 dolog ("invalid channel %#x %#x\n", nport, data);
177 return;
178 }
179 d->regs[ichan].page = data;
180}
181
182static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
183{
184 struct dma_cont *d = opaque;
185 int ichan;
186
187 ichan = channels[nport & 7];
188 if (-1 == ichan) {
189 dolog ("invalid channel %#x %#x\n", nport, data);
190 return;
191 }
192 d->regs[ichan].pageh = data;
193}
194
195static uint32_t read_page (void *opaque, uint32_t nport)
196{
197 struct dma_cont *d = opaque;
198 int ichan;
199
200 ichan = channels[nport & 7];
201 if (-1 == ichan) {
202 dolog ("invalid channel read %#x\n", nport);
203 return 0;
204 }
205 return d->regs[ichan].page;
206}
207
208static uint32_t read_pageh (void *opaque, uint32_t nport)
209{
210 struct dma_cont *d = opaque;
211 int ichan;
212
213 ichan = channels[nport & 7];
214 if (-1 == ichan) {
215 dolog ("invalid channel read %#x\n", nport);
216 return 0;
217 }
218 return d->regs[ichan].pageh;
219}
220
221static inline void init_chan (struct dma_cont *d, int ichan)
222{
223 struct dma_regs *r;
224
225 r = d->regs + ichan;
226 r->now[ADDR] = r->base[ADDR] << d->dshift;
227 r->now[COUNT] = 0;
228}
229
230static inline int getff (struct dma_cont *d)
231{
232 int ff;
233
234 ff = d->flip_flop;
235 d->flip_flop = !ff;
236 return ff;
237}
238
239static uint32_t read_chan (void *opaque, uint32_t nport)
240{
241 struct dma_cont *d = opaque;
242 int ichan, nreg, iport, ff, val, dir;
243 struct dma_regs *r;
244
245 iport = (nport >> d->dshift) & 0x0f;
246 ichan = iport >> 1;
247 nreg = iport & 1;
248 r = d->regs + ichan;
249
250 dir = ((r->mode >> 5) & 1) ? -1 : 1;
251 ff = getff (d);
252 if (nreg)
253 val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
254 else
255 val = r->now[ADDR] + r->now[COUNT] * dir;
256
257 ldebug ("read_chan %#x -> %d\n", iport, val);
258 return (val >> (d->dshift + (ff << 3))) & 0xff;
259}
260
261static void write_chan (void *opaque, uint32_t nport, uint32_t data)
262{
263 struct dma_cont *d = opaque;
264 int iport, ichan, nreg;
265 struct dma_regs *r;
266
267 iport = (nport >> d->dshift) & 0x0f;
268 ichan = iport >> 1;
269 nreg = iport & 1;
270 r = d->regs + ichan;
271 if (getff (d)) {
272 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
273 init_chan (d, ichan);
274 } else {
275 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
276 }
277}
278
279static void write_cont (void *opaque, uint32_t nport, uint32_t data)
280{
281 struct dma_cont *d = opaque;
282 int iport, ichan = 0;
283
284 iport = (nport >> d->dshift) & 0x0f;
285 switch (iport) {
286 case 0x08: /* command */
287 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
288 dolog ("command %#x not supported\n", data);
289 return;
290 }
291 d->command = data;
292 break;
293
294 case 0x09:
295 ichan = data & 3;
296 if (data & 4) {
297 d->status |= 1 << (ichan + 4);
298 }
299 else {
300 d->status &= ~(1 << (ichan + 4));
301 }
302 d->status &= ~(1 << ichan);
303 break;
304
305 case 0x0a: /* single mask */
306 if (data & 4)
307 d->mask |= 1 << (data & 3);
308 else
309 d->mask &= ~(1 << (data & 3));
310 break;
311
312 case 0x0b: /* mode */
313 {
314 ichan = data & 3;
315#ifdef DEBUG_DMA
316 {
317 int op, ai, dir, opmode;
318 op = (data >> 2) & 3;
319 ai = (data >> 4) & 1;
320 dir = (data >> 5) & 1;
321 opmode = (data >> 6) & 3;
322
323 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
324 ichan, op, ai, dir, opmode);
325 }
326#endif
327 d->regs[ichan].mode = data;
328 break;
329 }
330
331 case 0x0c: /* clear flip flop */
332 d->flip_flop = 0;
333 break;
334
335 case 0x0d: /* reset */
336 d->flip_flop = 0;
337 d->mask = ~0;
338 d->status = 0;
339 d->command = 0;
340 break;
341
342 case 0x0e: /* clear mask for all channels */
343 d->mask = 0;
344 break;
345
346 case 0x0f: /* write mask for all channels */
347 d->mask = data;
348 break;
349
350 default:
351 dolog ("unknown iport %#x\n", iport);
352 break;
353 }
354
355#ifdef DEBUG_DMA
356 if (0xc != iport) {
357 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
358 nport, ichan, data);
359 }
360#endif
361}
362
363static uint32_t read_cont (void *opaque, uint32_t nport)
364{
365 struct dma_cont *d = opaque;
366 int iport, val;
367
368 iport = (nport >> d->dshift) & 0x0f;
369 switch (iport) {
370 case 0x08: /* status */
371 val = d->status;
372 d->status &= 0xf0;
373 break;
374 case 0x0f: /* mask */
375 val = d->mask;
376 break;
377 default:
378 val = 0;
379 break;
380 }
381
382 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
383 return val;
384}
385
386static uint8_t DMA_get_channel_mode (DMAState *s, int nchan)
387{
388 return s->dma_controllers[nchan > 3].regs[nchan & 3].mode;
389}
390
391static void DMA_hold_DREQ (DMAState *s, int nchan)
392{
393 int ncont, ichan;
394
395 ncont = nchan > 3;
396 ichan = nchan & 3;
397 linfo ("held cont=%d chan=%d\n", ncont, ichan);
398 s->dma_controllers[ncont].status |= 1 << (ichan + 4);
399}
400
401static void DMA_release_DREQ (DMAState *s, int nchan)
402{
403 int ncont, ichan;
404
405 ncont = nchan > 3;
406 ichan = nchan & 3;
407 linfo ("released cont=%d chan=%d\n", ncont, ichan);
408 s->dma_controllers[ncont].status &= ~(1 << (ichan + 4));
409}
410
411static void channel_run (DMAState *s, int ncont, int ichan)
412{
413 int n;
414 struct dma_regs *r = &s->dma_controllers[ncont].regs[ichan];
415#ifdef DEBUG_DMA
416 int dir, opmode;
417
418 dir = (r->mode >> 5) & 1;
419 opmode = (r->mode >> 6) & 3;
420
421 if (dir) {
422 dolog ("DMA in address decrement mode\n");
423 }
424 if (opmode != 1) {
425 dolog ("DMA not in single mode select %#x\n", opmode);
426 }
427#endif
428
429 r = s->dma_controllers[ncont].regs + ichan;
430 n = r->transfer_handler (s->pDevIns, r->opaque, ichan + (ncont << 2),
431 r->now[COUNT], (r->base[COUNT] + 1) << ncont);
432 r->now[COUNT] = n;
433 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
434}
435
436static void DMA_run (DMAState *s)
437{
438 struct dma_cont *d;
439 int icont, ichan;
440
441 d = s->dma_controllers;
442
443 for (icont = 0; icont < 2; icont++, d++) {
444 for (ichan = 0; ichan < 4; ichan++) {
445 int mask;
446
447 mask = 1 << ichan;
448
449 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
450 channel_run (s, icont, ichan);
451 }
452 }
453}
454
455static void DMA_register_channel (DMAState *s, unsigned nchan,
456 DMA_transfer_handler transfer_handler,
457 void *opaque)
458{
459 struct dma_regs *r;
460 int ichan, ncont;
461 LogFlow (("DMA_register_channel: s=%p nchan=%d transfer_handler=%p opaque=%p\n",
462 s, nchan, transfer_handler, opaque));
463
464 ncont = nchan > 3;
465 ichan = nchan & 3;
466
467 r = s->dma_controllers[ncont].regs + ichan;
468 r->transfer_handler = transfer_handler;
469 r->opaque = opaque;
470}
471
472static uint32_t DMA_read_memory (DMAState *s,
473 unsigned nchan,
474 void *buf,
475 uint32_t pos,
476 uint32_t len)
477{
478 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
479 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
480
481 if (r->mode & 0x20) {
482 int i;
483 uint8_t *p = buf;
484
485#ifdef VBOX
486 PDMDevHlpPhysRead (s->pDevIns, addr - pos - len, buf, len);
487#else
488 cpu_physical_memory_read (addr - pos - len, buf, len);
489#endif
490 /* What about 16bit transfers? */
491 for (i = 0; i < len >> 1; i++) {
492 uint8_t b = p[len - i - 1];
493 p[i] = b;
494 }
495 }
496 else
497#ifdef VBOX
498 PDMDevHlpPhysRead (s->pDevIns, addr + pos, buf, len);
499#else
500 cpu_physical_memory_read (addr + pos, buf, len);
501#endif
502 return len;
503}
504
505static uint32_t DMA_write_memory (DMAState *s,
506 unsigned nchan,
507 const void *buf,
508 uint32_t pos,
509 uint32_t len)
510{
511 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
512 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
513
514 if (r->mode & 0x20) {
515 int i;
516 uint8_t *p = (uint8_t *) buf;
517
518#ifdef VBOX
519 PDMDevHlpPhysWrite (s->pDevIns, addr - pos - len, buf, len);
520#else
521 cpu_physical_memory_write (addr - pos - len, buf, len);
522#endif
523 /* What about 16bit transfers? */
524 for (i = 0; i < len; i++) {
525 uint8_t b = p[len - i - 1];
526 p[i] = b;
527 }
528 }
529 else
530#ifdef VBOX
531 PDMDevHlpPhysWrite (s->pDevIns, addr + pos, buf, len);
532#else
533 cpu_physical_memory_write (addr + pos, buf, len);
534#endif
535
536 return len;
537}
538
539
540#ifndef VBOX
541/* request the emulator to transfer a new DMA memory block ASAP */
542void DMA_schedule(int nchan)
543{
544 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
545}
546#endif
547
548static void dma_reset(void *opaque)
549{
550 struct dma_cont *d = opaque;
551 write_cont (d, (0x0d << d->dshift), 0);
552}
553
554#ifdef VBOX
555#define IO_READ_PROTO(n) \
556static DECLCALLBACK(int) io_read_##n (PPDMDEVINS pDevIns, \
557 void *pvUser, \
558 RTIOPORT Port, \
559 uint32_t *pu32, \
560 unsigned cb)
561
562
563#define IO_WRITE_PROTO(n) \
564static DECLCALLBACK(int) io_write_##n (PPDMDEVINS pDevIns, \
565 void *pvUser, \
566 RTIOPORT Port, \
567 uint32_t u32, \
568 unsigned cb)
569
570IO_WRITE_PROTO (chan)
571{
572 if (cb == 1) {
573 write_chan (pvUser, Port, u32);
574 }
575#ifdef PARANOID
576 else {
577 Log (("Unknown write to %#x of size %d, value %#x\n",
578 Port, cb, u32));
579 }
580#endif
581 return VINF_SUCCESS;
582}
583
584IO_WRITE_PROTO (page)
585{
586 if (cb == 1) {
587 write_page (pvUser, Port, u32);
588 }
589#ifdef PARANOID
590 else {
591 Log (("Unknown write to %#x of size %d, value %#x\n",
592 Port, cb, u32));
593 }
594#endif
595 return VINF_SUCCESS;
596}
597
598IO_WRITE_PROTO (pageh)
599{
600 if (cb == 1) {
601 write_pageh (pvUser, Port, u32);
602 }
603#ifdef PARANOID
604 else {
605 Log (("Unknown write to %#x of size %d, value %#x\n",
606 Port, cb, u32));
607 }
608#endif
609 return VINF_SUCCESS;
610}
611
612IO_WRITE_PROTO (cont)
613{
614 if (cb == 1) {
615 write_cont (pvUser, Port, u32);
616 }
617#ifdef PARANOID
618 else {
619 Log (("Unknown write to %#x of size %d, value %#x\n",
620 Port, cb, u32));
621 }
622#endif
623 return VINF_SUCCESS;
624}
625
626IO_READ_PROTO (chan)
627{
628 if (cb == 1) {
629 *pu32 = read_chan (pvUser, Port);
630 return VINF_SUCCESS;
631 }
632 else {
633 return VERR_IOM_IOPORT_UNUSED;
634 }
635}
636
637IO_READ_PROTO (page)
638{
639 if (cb == 1) {
640 *pu32 = read_page (pvUser, Port);
641 return VINF_SUCCESS;
642 }
643 else {
644 return VERR_IOM_IOPORT_UNUSED;
645 }
646}
647
648IO_READ_PROTO (pageh)
649{
650 if (cb == 1) {
651 *pu32 = read_pageh (pvUser, Port);
652 return VINF_SUCCESS;
653 }
654 else {
655 return VERR_IOM_IOPORT_UNUSED;
656 }
657}
658
659IO_READ_PROTO (cont)
660{
661 if (cb == 1) {
662 *pu32 = read_cont (pvUser, Port);
663 return VINF_SUCCESS;
664 }
665 else {
666 return VERR_IOM_IOPORT_UNUSED;
667 }
668}
669#endif
670
671/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
672static void dma_init2(DMAState *s, struct dma_cont *d, int base, int dshift,
673 int page_base, int pageh_base)
674{
675 const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
676 int i;
677
678 d->dshift = dshift;
679 for (i = 0; i < 8; i++) {
680#ifdef VBOX
681 PDMDevHlpIOPortRegister (s->pDevIns, base + (i << dshift), 1, d,
682 io_write_chan, io_read_chan, NULL, NULL, "DMA");
683#else
684 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
685 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
686#endif
687 }
688 for (i = 0; i < LENOFA (page_port_list); i++) {
689#ifdef VBOX
690 PDMDevHlpIOPortRegister (s->pDevIns, page_base + page_port_list[i], 1, d,
691 io_write_page, io_read_page, NULL, NULL, "DMA Page");
692#else
693 register_ioport_write (page_base + page_port_list[i], 1, 1,
694 write_page, d);
695 register_ioport_read (page_base + page_port_list[i], 1, 1,
696 read_page, d);
697#endif
698 if (pageh_base >= 0) {
699#ifdef VBOX
700 PDMDevHlpIOPortRegister (s->pDevIns, pageh_base + page_port_list[i], 1, d,
701 io_write_pageh, io_read_pageh, NULL, NULL, "DMA Page High");
702#else
703 register_ioport_write (pageh_base + page_port_list[i], 1, 1,
704 write_pageh, d);
705 register_ioport_read (pageh_base + page_port_list[i], 1, 1,
706 read_pageh, d);
707#endif
708 }
709 }
710 for (i = 0; i < 8; i++) {
711#ifdef VBOX
712 PDMDevHlpIOPortRegister (s->pDevIns, base + ((i + 8) << dshift), 1, d,
713 io_write_cont, io_read_cont, NULL, NULL, "DMA cont");
714#else
715 register_ioport_write (base + ((i + 8) << dshift), 1, 1,
716 write_cont, d);
717 register_ioport_read (base + ((i + 8) << dshift), 1, 1,
718 read_cont, d);
719#endif
720 }
721#ifndef VBOX
722 qemu_register_reset(dma_reset, d);
723#endif
724 dma_reset(d);
725}
726
727static void dma_save (QEMUFile *f, void *opaque)
728{
729 struct dma_cont *d = opaque;
730 int i;
731
732 /* qemu_put_8s (f, &d->status); */
733 qemu_put_8s (f, &d->command);
734 qemu_put_8s (f, &d->mask);
735 qemu_put_8s (f, &d->flip_flop);
736 qemu_put_be32s (f, &d->dshift);
737
738 for (i = 0; i < 4; ++i) {
739 struct dma_regs *r = &d->regs[i];
740 qemu_put_be32s (f, &r->now[0]);
741 qemu_put_be32s (f, &r->now[1]);
742 qemu_put_be16s (f, &r->base[0]);
743 qemu_put_be16s (f, &r->base[1]);
744 qemu_put_8s (f, &r->mode);
745 qemu_put_8s (f, &r->page);
746 qemu_put_8s (f, &r->pageh);
747 qemu_put_8s (f, &r->dack);
748 qemu_put_8s (f, &r->eop);
749 }
750}
751
752static int dma_load (QEMUFile *f, void *opaque, int version_id)
753{
754 struct dma_cont *d = opaque;
755 int i;
756
757 if (version_id != 1)
758#ifdef VBOX
759 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
760#else
761 return -EINVAL;
762#endif
763
764 /* qemu_get_8s (f, &d->status); */
765 qemu_get_8s (f, &d->command);
766 qemu_get_8s (f, &d->mask);
767 qemu_get_8s (f, &d->flip_flop);
768 qemu_get_be32s (f, &d->dshift);
769
770 for (i = 0; i < 4; ++i) {
771 struct dma_regs *r = &d->regs[i];
772 qemu_get_be32s (f, &r->now[0]);
773 qemu_get_be32s (f, &r->now[1]);
774 qemu_get_be16s (f, &r->base[0]);
775 qemu_get_be16s (f, &r->base[1]);
776 qemu_get_8s (f, &r->mode);
777 qemu_get_8s (f, &r->page);
778 qemu_get_8s (f, &r->pageh);
779 qemu_get_8s (f, &r->dack);
780 qemu_get_8s (f, &r->eop);
781 }
782 return 0;
783}
784
785#ifndef VBOX
786void DMA_init (int high_page_enable)
787{
788 dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
789 high_page_enable ? 0x480 : -1);
790 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
791 high_page_enable ? 0x488 : -1);
792 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
793 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
794}
795#endif
796
797#ifdef VBOX
798static bool run_wrapper (PPDMDEVINS pDevIns)
799{
800 DMA_run (PDMINS2DATA (pDevIns, DMAState *));
801 return 0;
802}
803
804static void register_channel_wrapper (PPDMDEVINS pDevIns,
805 unsigned nchan,
806 PFNDMATRANSFERHANDLER f,
807 void *opaque)
808{
809 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
810 DMA_register_channel (s, nchan, f, opaque);
811}
812
813static uint32_t rd_mem_wrapper (PPDMDEVINS pDevIns,
814 unsigned nchan,
815 void *buf,
816 uint32_t pos,
817 uint32_t len)
818{
819 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
820 return DMA_read_memory (s, nchan, buf, pos, len);
821}
822
823static uint32_t wr_mem_wrapper (PPDMDEVINS pDevIns,
824 unsigned nchan,
825 const void *buf,
826 uint32_t pos,
827 uint32_t len)
828{
829 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
830 return DMA_write_memory (s, nchan, buf, pos, len);
831}
832
833static void set_DREQ_wrapper (PPDMDEVINS pDevIns,
834 unsigned nchan,
835 unsigned level)
836{
837 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
838 if (level) {
839 DMA_hold_DREQ (s, nchan);
840 }
841 else {
842 DMA_release_DREQ (s, nchan);
843 }
844}
845
846static uint8_t get_mode_wrapper (PPDMDEVINS pDevIns, unsigned nchan)
847{
848 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
849 return DMA_get_channel_mode (s, nchan);
850}
851
852static void DMAReset (PPDMDEVINS pDevIns)
853{
854 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
855 dma_reset (&s->dma_controllers[0]);
856 dma_reset (&s->dma_controllers[1]);
857}
858
859static DECLCALLBACK(int) SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle)
860{
861 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
862 dma_save (pSSMHandle, &s->dma_controllers[0]);
863 dma_save (pSSMHandle, &s->dma_controllers[1]);
864 return VINF_SUCCESS;
865}
866
867static DECLCALLBACK(int) LoadExec (PPDMDEVINS pDevIns,
868 PSSMHANDLE pSSMHandle,
869 uint32_t u32Version)
870{
871 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
872
873 if (u32Version != 1) {
874 AssertFailed ();
875 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
876 }
877
878 dma_load (pSSMHandle, &s->dma_controllers[0], u32Version);
879 return dma_load (pSSMHandle, &s->dma_controllers[1], u32Version);
880}
881
882/**
883 * Construct a device instance for a VM.
884 *
885 * @returns VBox status.
886 * @param pDevIns The device instance data.
887 * If the registration structure is needed, pDevIns->pDevReg points to it.
888 * @param iInstance Instance number. Use this to figure out which registers and such to use.
889 * The device number is also found in pDevIns->iInstance, but since it's
890 * likely to be freqently used PDM passes it as parameter.
891 * @param pCfgHandle Configuration node handle for the device. Use this to obtain the configuration
892 * of the device instance. It's also found in pDevIns->pCfgHandle, but like
893 * iInstance it's expected to be used a bit in this function.
894 */
895static DECLCALLBACK(int) DMAConstruct(PPDMDEVINS pDevIns,
896 int iInstance,
897 PCFGMNODE pCfgHandle)
898{
899 DMAState *s = PDMINS2DATA (pDevIns, DMAState *);
900 bool high_page_enable = 0;
901 PDMDMACREG reg;
902 int rc;
903
904 s->pDevIns = pDevIns;
905
906 /*
907 * Validate configuration.
908 */
909 if (!CFGMR3AreValuesValid(pCfgHandle, "\0")) /* "HighPageEnable\0")) */
910 return VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES;
911
912#if 0
913 rc = CFGMR3QueryBool (pCfgHandle, "HighPageEnable", &high_page_enable);
914 if (VBOX_FAILURE (rc)) {
915 return rc;
916 }
917#endif
918
919 dma_init2(s, &s->dma_controllers[0], 0x00, 0, 0x80,
920 high_page_enable ? 0x480 : -1);
921 dma_init2(s, &s->dma_controllers[1], 0xc0, 1, 0x88,
922 high_page_enable ? 0x488 : -1);
923
924 reg.u32Version = PDM_DMACREG_VERSION;
925 reg.pfnRun = run_wrapper;
926 reg.pfnRegister = register_channel_wrapper;
927 reg.pfnReadMemory = rd_mem_wrapper;
928 reg.pfnWriteMemory = wr_mem_wrapper;
929 reg.pfnSetDREQ = set_DREQ_wrapper;
930 reg.pfnGetChannelMode = get_mode_wrapper;
931
932 Assert(pDevIns->pDevHlp->pfnDMARegister);
933 rc = pDevIns->pDevHlp->pfnDMACRegister (pDevIns, &reg, &s->pHlp);
934 if (VBOX_FAILURE (rc)) {
935 return rc;
936 }
937
938 rc = PDMDevHlpSSMRegister (pDevIns, pDevIns->pDevReg->szDeviceName, iInstance, 1, sizeof (*s),
939 NULL, SaveExec, NULL, NULL, LoadExec, NULL);
940 if (VBOX_FAILURE(rc))
941 return rc;
942
943 return VINF_SUCCESS;
944}
945
946/**
947 * The device registration structure.
948 */
949const PDMDEVREG g_DeviceDMA =
950{
951 /* u32Version */
952 PDM_DEVREG_VERSION,
953 /* szDeviceName */
954 "8237A",
955 /* szGCMod */
956 "",
957 /* szR0Mod */
958 "",
959 /* pszDescription */
960 "DMA Controller.\n",
961 /* fFlags */
962 PDM_DEVREG_FLAGS_HOST_BITS_DEFAULT | PDM_DEVREG_FLAGS_GUEST_BITS_DEFAULT,
963 /* fClass */
964 PDM_DEVREG_CLASS_DMA,
965 /* cMaxInstances */
966 1,
967 /* cbInstance */
968 sizeof(DMAState),
969 /* pfnConstruct */
970 DMAConstruct,
971 /* pfnDestruct */
972 NULL,
973 /* pfnRelocate */
974 NULL,
975 /* pfnIOCtl */
976 NULL,
977 /* pfnPowerOn */
978 NULL,
979 /* pfnReset */
980 DMAReset,
981 /* pfnSuspend */
982 NULL,
983 /* pfnResume */
984 NULL,
985 /* pfnAttach */
986 NULL,
987 /* pfnDetach */
988 NULL,
989 /* pfnQueryInterface. */
990 NULL,
991 /* pfnInitComplete */
992 NULL,
993 /* pfnPowerOff */
994 NULL
995};
996#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette