VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 65992

Last change on this file since 65992 was 65992, checked in by vboxsync, 8 years ago

Additions/linux: Linux 4.11 compile fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.8 KB
Line 
1/* $Id: vbox_main.c 65992 2017-03-08 11:24:53Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 * --------------------------------------------------------------------
17 *
18 * This code is based on
19 * ast_main.c
20 * with the following copyright and permission notice:
21 *
22 * Copyright 2012 Red Hat Inc.
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a
25 * copy of this software and associated documentation files (the
26 * "Software"), to deal in the Software without restriction, including
27 * without limitation the rights to use, copy, modify, merge, publish,
28 * distribute, sub license, and/or sell copies of the Software, and to
29 * permit persons to whom the Software is furnished to do so, subject to
30 * the following conditions:
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38 * USE OR OTHER DEALINGS IN THE SOFTWARE.
39 *
40 * The above copyright notice and this permission notice (including the
41 * next paragraph) shall be included in all copies or substantial portions
42 * of the Software.
43 *
44 */
45/*
46 * Authors: Dave Airlie <[email protected]>
47 */
48#include "vbox_drv.h"
49
50#include <VBoxVideoGuest.h>
51#include <VBoxVideoVBE.h>
52
53#include <drm/drm_fb_helper.h>
54#include <drm/drm_crtc_helper.h>
55
56static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
57{
58 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
59 if (vbox_fb->obj)
60 drm_gem_object_unreference_unlocked(vbox_fb->obj);
61
62 drm_framebuffer_cleanup(fb);
63 kfree(fb);
64}
65
66void vbox_enable_accel(struct vbox_private *vbox)
67{
68 unsigned i;
69 struct VBVABUFFER *vbva;
70 uint32_t vram_map_offset = vbox->available_vram_size - vbox->vram_map_start;
71
72 if (vbox->vbva_info == NULL) { /* Should never happen... */
73 printk(KERN_ERR "vboxvideo: failed to set up VBVA.\n");
74 return;
75 }
76 for (i = 0; i < vbox->num_crtcs; ++i) {
77 if (vbox->vbva_info[i].pVBVA == NULL) {
78 vbva = (struct VBVABUFFER *) ( ((uint8_t *)vbox->mapped_vram)
79 + vram_map_offset
80 + i * VBVA_MIN_BUFFER_SIZE);
81 if (!VBoxVBVAEnable(&vbox->vbva_info[i], &vbox->submit_info, vbva, i)) {
82 /* very old host or driver error. */
83 printk(KERN_ERR "vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n");
84 return;
85 }
86 }
87 }
88}
89
90void vbox_disable_accel(struct vbox_private *vbox)
91{
92 unsigned i;
93
94 for (i = 0; i < vbox->num_crtcs; ++i)
95 VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
96}
97
98void vbox_report_caps(struct vbox_private *vbox)
99{
100 uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
101 | VBVACAPS_IRQ
102 | VBVACAPS_USE_VBVA_ONLY;
103 if (vbox->initial_mode_queried)
104 caps |= VBVACAPS_VIDEO_MODE_HINTS;
105 VBoxHGSMISendCapsInfo(&vbox->submit_info, caps);
106}
107
108/** Send information about dirty rectangles to VBVA. If necessary we enable
109 * VBVA first, as this is normally disabled after a change of master in case
110 * the new master does not send dirty rectangle information (is this even
111 * allowed?) */
112void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
113 struct drm_clip_rect *rects,
114 unsigned num_rects)
115{
116 struct vbox_private *vbox = fb->dev->dev_private;
117 struct drm_crtc *crtc;
118 unsigned i;
119
120 mutex_lock(&vbox->hw_mutex);
121 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
122 if (CRTC_FB(crtc) == fb) {
123 vbox_enable_accel(vbox);
124 for (i = 0; i < num_rects; ++i)
125 {
126 unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id;
127 VBVACMDHDR cmd_hdr;
128
129 if ( rects[i].x1 > crtc->x
130 + crtc->hwmode.hdisplay
131 || rects[i].y1 > crtc->y
132 + crtc->hwmode.vdisplay
133 || rects[i].x2 < crtc->x
134 || rects[i].y2 < crtc->y)
135 continue;
136 cmd_hdr.x = (int16_t)rects[i].x1;
137 cmd_hdr.y = (int16_t)rects[i].y1;
138 cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1;
139 cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1;
140 if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
141 &vbox->submit_info))
142 {
143 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], &vbox->submit_info, &cmd_hdr,
144 sizeof(cmd_hdr));
145 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
146 }
147 }
148 }
149 }
150 mutex_unlock(&vbox->hw_mutex);
151}
152
153static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
154 struct drm_file *file_priv,
155 unsigned flags, unsigned color,
156 struct drm_clip_rect *rects,
157 unsigned num_rects)
158{
159 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
160 return 0;
161}
162
163static const struct drm_framebuffer_funcs vbox_fb_funcs = {
164 .destroy = vbox_user_framebuffer_destroy,
165 .dirty = vbox_user_framebuffer_dirty,
166};
167
168
169int vbox_framebuffer_init(struct drm_device *dev,
170 struct vbox_framebuffer *vbox_fb,
171#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
172 const
173#endif
174 struct DRM_MODE_FB_CMD *mode_cmd,
175 struct drm_gem_object *obj)
176{
177 int ret;
178
179#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
180 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
181#else
182 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
183#endif
184 vbox_fb->obj = obj;
185 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
186 if (ret) {
187 DRM_ERROR("framebuffer init failed %d\n", ret);
188 return ret;
189 }
190 return 0;
191}
192
193static struct drm_framebuffer *
194vbox_user_framebuffer_create(struct drm_device *dev,
195 struct drm_file *filp,
196#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
197 const
198#endif
199 struct drm_mode_fb_cmd2 *mode_cmd)
200{
201 struct drm_gem_object *obj;
202 struct vbox_framebuffer *vbox_fb;
203 int ret;
204
205#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
206 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
207#else
208 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
209#endif
210 if (obj == NULL)
211 return ERR_PTR(-ENOENT);
212
213 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
214 if (!vbox_fb) {
215 drm_gem_object_unreference_unlocked(obj);
216 return ERR_PTR(-ENOMEM);
217 }
218
219 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
220 if (ret) {
221 drm_gem_object_unreference_unlocked(obj);
222 kfree(vbox_fb);
223 return ERR_PTR(ret);
224 }
225 return &vbox_fb->base;
226}
227
228static const struct drm_mode_config_funcs vbox_mode_funcs = {
229 .fb_create = vbox_user_framebuffer_create,
230};
231
232static void vbox_accel_fini(struct vbox_private *vbox)
233{
234 if (vbox->vbva_info)
235 {
236 vbox_disable_accel(vbox);
237 kfree(vbox->vbva_info);
238 vbox->vbva_info = NULL;
239 }
240}
241
242static int vbox_accel_init(struct vbox_private *vbox)
243{
244 unsigned i;
245 if (!vbox->vbva_info)
246 {
247 vbox->vbva_info = kzalloc( sizeof(struct VBVABUFFERCONTEXT)
248 * vbox->num_crtcs,
249 GFP_KERNEL);
250 if (!vbox->vbva_info)
251 return -ENOMEM;
252 }
253 /* Take a command buffer for each screen from the end of usable VRAM. */
254 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
255 for (i = 0; i < vbox->num_crtcs; ++i)
256 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
257 vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE,
258 VBVA_MIN_BUFFER_SIZE);
259 return 0;
260}
261
262/** Allocation function for the HGSMI heap and data. */
263static DECLCALLBACK(void *) alloc_hgsmi_environ(void *environ, HGSMISIZE size)
264{
265 NOREF(environ);
266 return kmalloc(size, GFP_KERNEL);
267}
268
269
270/** Free function for the HGSMI heap and data. */
271static DECLCALLBACK(void) free_hgsmi_environ(void *environ, void *ptr)
272{
273 NOREF(environ);
274 kfree(ptr);
275}
276
277
278/** Pointers to the HGSMI heap and data manipulation functions. */
279static HGSMIENV hgsmi_environ =
280{
281 NULL,
282 alloc_hgsmi_environ,
283 free_hgsmi_environ
284};
285
286
287/** Do we support the 4.3 plus mode hint reporting interface? */
288static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
289{
290 uint32_t have_hints, have_cursor;
291
292 return RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints))
293 && RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor))
294 && have_hints == VINF_SUCCESS
295 && have_cursor == VINF_SUCCESS;
296}
297
298#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
299# define pci_iomap_range(dev, bar, offset, maxlen) \
300 ioremap(pci_resource_start(dev, bar) + offset, maxlen)
301#endif
302
303/** Set up our heaps and data exchange buffers in VRAM before handing the rest
304 * to the memory manager. */
305static int vbox_hw_init(struct vbox_private *vbox)
306{
307 uint32_t base_offset, map_start, guest_heap_offset, guest_heap_size, host_flags_offset;
308 void *guest_heap;
309
310 vbox->full_vram_size = VBoxVideoGetVRAMSize();
311 vbox->any_pitch = VBoxVideoAnyWidthAllowed();
312 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
313 VBoxHGSMIGetBaseMappingInfo(vbox->full_vram_size, &base_offset, NULL,
314 &guest_heap_offset, &guest_heap_size, &host_flags_offset);
315 map_start = (uint32_t)max((int)base_offset
316 - VBOX_MAX_SCREENS * VBVA_MIN_BUFFER_SIZE, 0);
317 vbox->mapped_vram = pci_iomap_range(vbox->dev->pdev, 0, map_start,
318 vbox->full_vram_size - map_start);
319 if (!vbox->mapped_vram)
320 return -ENOMEM;
321 vbox->vram_map_start = map_start;
322 guest_heap = ((uint8_t *)vbox->mapped_vram) + base_offset - map_start
323 + guest_heap_offset;
324 vbox->host_flags_offset = base_offset - map_start + host_flags_offset;
325 if (RT_FAILURE(VBoxHGSMISetupGuestContext(&vbox->submit_info, guest_heap,
326 guest_heap_size,
327 base_offset + guest_heap_offset,
328 &hgsmi_environ)))
329 return -ENOMEM;
330 /* Reduce available VRAM size to reflect the guest heap. */
331 vbox->available_vram_size = base_offset;
332 /* Linux drm represents monitors as a 32-bit array. */
333 vbox->num_crtcs = min(VBoxHGSMIGetMonitorCount(&vbox->submit_info),
334 (uint32_t)VBOX_MAX_SCREENS);
335 if (!have_hgsmi_mode_hints(vbox))
336 return -ENOTSUPP;
337 vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL);
338 if (!vbox->last_mode_hints)
339 return -ENOMEM;
340 return vbox_accel_init(vbox);
341}
342
343static void vbox_hw_fini(struct vbox_private *vbox)
344{
345 vbox_accel_fini(vbox);
346 if (vbox->last_mode_hints)
347 kfree(vbox->last_mode_hints);
348 vbox->last_mode_hints = NULL;
349}
350
351int vbox_driver_load(struct drm_device *dev, unsigned long flags)
352{
353 struct vbox_private *vbox;
354 int ret = 0;
355
356 if (!VBoxHGSMIIsSupported())
357 return -ENODEV;
358 vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL);
359 if (!vbox)
360 return -ENOMEM;
361
362 dev->dev_private = vbox;
363 vbox->dev = dev;
364
365 mutex_init(&vbox->hw_mutex);
366
367 ret = vbox_hw_init(vbox);
368 if (ret)
369 goto out_free;
370
371 ret = vbox_mm_init(vbox);
372 if (ret)
373 goto out_free;
374
375 drm_mode_config_init(dev);
376
377 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
378 dev->mode_config.min_width = 64;
379 dev->mode_config.min_height = 64;
380 dev->mode_config.preferred_depth = 24;
381 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
382 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
383
384 ret = vbox_mode_init(dev);
385 if (ret)
386 goto out_free;
387
388 ret = vbox_irq_init(vbox);
389 if (ret)
390 goto out_free;
391
392 ret = vbox_fbdev_init(dev);
393 if (ret)
394 goto out_free;
395 return 0;
396out_free:
397 vbox_driver_unload(dev);
398 return ret;
399}
400
401#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
402void vbox_driver_unload(struct drm_device *dev)
403#else
404int vbox_driver_unload(struct drm_device *dev)
405#endif
406{
407 struct vbox_private *vbox = dev->dev_private;
408
409 vbox_fbdev_fini(dev);
410 vbox_irq_fini(vbox);
411 vbox_mode_fini(dev);
412 if (dev->mode_config.funcs)
413 drm_mode_config_cleanup(dev);
414
415 vbox_hw_fini(vbox);
416 vbox_mm_fini(vbox);
417 if (vbox->mapped_vram)
418 pci_iounmap(dev->pdev, vbox->mapped_vram);
419 kfree(vbox);
420 dev->dev_private = NULL;
421#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
422 return 0;
423#endif
424}
425
426/** @note this is described in the DRM framework documentation. AST does not
427 * have it, but we get an oops on driver unload if it is not present. */
428void vbox_driver_lastclose(struct drm_device *dev)
429{
430 struct vbox_private *vbox = dev->dev_private;
431
432#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
433 if (vbox->fbdev)
434 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
435#else
436 drm_modeset_lock_all(dev);
437 if (vbox->fbdev)
438 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
439 drm_modeset_unlock_all(dev);
440#endif
441}
442
443int vbox_gem_create(struct drm_device *dev,
444 u32 size, bool iskernel,
445 struct drm_gem_object **obj)
446{
447 struct vbox_bo *vboxbo;
448 int ret;
449
450 *obj = NULL;
451
452 size = roundup(size, PAGE_SIZE);
453 if (size == 0)
454 return -EINVAL;
455
456 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
457 if (ret) {
458 if (ret != -ERESTARTSYS)
459 DRM_ERROR("failed to allocate GEM object\n");
460 return ret;
461 }
462 *obj = &vboxbo->gem;
463 return 0;
464}
465
466int vbox_dumb_create(struct drm_file *file,
467 struct drm_device *dev,
468 struct drm_mode_create_dumb *args)
469{
470 int ret;
471 struct drm_gem_object *gobj;
472 u32 handle;
473
474 args->pitch = args->width * ((args->bpp + 7) / 8);
475 args->size = args->pitch * args->height;
476
477 ret = vbox_gem_create(dev, args->size, false,
478 &gobj);
479 if (ret)
480 return ret;
481
482 ret = drm_gem_handle_create(file, gobj, &handle);
483 drm_gem_object_unreference_unlocked(gobj);
484 if (ret)
485 return ret;
486
487 args->handle = handle;
488 return 0;
489}
490
491#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
492int vbox_dumb_destroy(struct drm_file *file,
493 struct drm_device *dev,
494 uint32_t handle)
495{
496 return drm_gem_handle_delete(file, handle);
497}
498#endif
499
500static void vbox_bo_unref(struct vbox_bo **bo)
501{
502 struct ttm_buffer_object *tbo;
503
504 if ((*bo) == NULL)
505 return;
506
507 tbo = &((*bo)->bo);
508 ttm_bo_unref(&tbo);
509 if (tbo == NULL)
510 *bo = NULL;
511
512}
513void vbox_gem_free_object(struct drm_gem_object *obj)
514{
515 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
516
517 vbox_bo_unref(&vbox_bo);
518}
519
520
521static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
522{
523#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
524 return bo->bo.addr_space_offset;
525#else
526 return drm_vma_node_offset_addr(&bo->bo.vma_node);
527#endif
528}
529int
530vbox_dumb_mmap_offset(struct drm_file *file,
531 struct drm_device *dev,
532 uint32_t handle,
533 uint64_t *offset)
534{
535 struct drm_gem_object *obj;
536 int ret;
537 struct vbox_bo *bo;
538
539 mutex_lock(&dev->struct_mutex);
540#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
541 obj = drm_gem_object_lookup(file, handle);
542#else
543 obj = drm_gem_object_lookup(dev, file, handle);
544#endif
545 if (obj == NULL) {
546 ret = -ENOENT;
547 goto out_unlock;
548 }
549
550 bo = gem_to_vbox_bo(obj);
551 *offset = vbox_bo_mmap_offset(bo);
552
553 drm_gem_object_unreference(obj);
554 ret = 0;
555out_unlock:
556 mutex_unlock(&dev->struct_mutex);
557 return ret;
558
559}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette