VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 67175

Last change on this file since 67175 was 67174, checked in by vboxsync, 8 years ago

bugref:8524: Additions/linux: play nicely with distribution-installed Additions
The vboxvideo causes a NULL pointer dereference inside the kernel on
kernel 4.10+ when ttm needs to make room in the video memory and tries
to evict things.

This is caused by upstream kernel commit a2ab19fed9d1 ("drm/ttm: make
eviction decision a driver callback v2") introducing a new
eviction_valuable callback which the vboxvideo driver does not define:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id=a2ab19fed9d1dc5a7a2ced44f4b289885c522a8f

This commit adds a definition for this callback, fixing the crash.

Signed-off-by: Hans de Goede <hdegoede@…>
Further updated to add a definition for the io_mem_pfn callback, added in
upstream kernel commit ea642c32 ("drm/ttm: add io_mem_pfn callback").

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.8 KB
Line 
1/* $Id: vbox_ttm.c 67174 2017-05-31 14:05:16Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36#include <ttm/ttm_page_alloc.h>
37
38#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
39# define PLACEMENT_FLAGS(placement) (placement)
40#else
41# define PLACEMENT_FLAGS(placement) (placement).flags
42#endif
43
44static inline struct vbox_private *
45vbox_bdev(struct ttm_bo_device *bd)
46{
47 return container_of(bd, struct vbox_private, ttm.bdev);
48}
49
50static int
51vbox_ttm_mem_global_init(struct drm_global_reference *ref)
52{
53 return ttm_mem_global_init(ref->object);
54}
55
56static void
57vbox_ttm_mem_global_release(struct drm_global_reference *ref)
58{
59 ttm_mem_global_release(ref->object);
60}
61
62/**
63 * Adds the vbox memory manager object/structures to the global memory manager.
64 */
65static int vbox_ttm_global_init(struct vbox_private *vbox)
66{
67 struct drm_global_reference *global_ref;
68 int r;
69
70 global_ref = &vbox->ttm.mem_global_ref;
71 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
72 global_ref->size = sizeof(struct ttm_mem_global);
73 global_ref->init = &vbox_ttm_mem_global_init;
74 global_ref->release = &vbox_ttm_mem_global_release;
75 r = drm_global_item_ref(global_ref);
76 if (r != 0) {
77 DRM_ERROR("Failed setting up TTM memory accounting "
78 "subsystem.\n");
79 return r;
80 }
81
82 vbox->ttm.bo_global_ref.mem_glob =
83 vbox->ttm.mem_global_ref.object;
84 global_ref = &vbox->ttm.bo_global_ref.ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_BO;
86 global_ref->size = sizeof(struct ttm_bo_global);
87 global_ref->init = &ttm_bo_global_init;
88 global_ref->release = &ttm_bo_global_release;
89 r = drm_global_item_ref(global_ref);
90 if (r != 0) {
91 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
92 drm_global_item_unref(&vbox->ttm.mem_global_ref);
93 return r;
94 }
95 return 0;
96}
97
98/**
99 * Removes the vbox memory manager object from the global memory manager.
100 */
101static void
102vbox_ttm_global_release(struct vbox_private *vbox)
103{
104 if (vbox->ttm.mem_global_ref.release == NULL)
105 return;
106
107 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
108 drm_global_item_unref(&vbox->ttm.mem_global_ref);
109 vbox->ttm.mem_global_ref.release = NULL;
110}
111
112
113static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
114{
115 struct vbox_bo *bo;
116
117 bo = container_of(tbo, struct vbox_bo, bo);
118
119 drm_gem_object_release(&bo->gem);
120 kfree(bo);
121}
122
123static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
124{
125 if (bo->destroy == &vbox_bo_ttm_destroy)
126 return true;
127 return false;
128}
129
130static int
131vbox_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
132 struct ttm_mem_type_manager *man)
133{
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
137 man->available_caching = TTM_PL_MASK_CACHING;
138 man->default_caching = TTM_PL_FLAG_CACHED;
139 break;
140 case TTM_PL_VRAM:
141 man->func = &ttm_bo_manager_func;
142 man->flags = TTM_MEMTYPE_FLAG_FIXED |
143 TTM_MEMTYPE_FLAG_MAPPABLE;
144 man->available_caching = TTM_PL_FLAG_UNCACHED |
145 TTM_PL_FLAG_WC;
146 man->default_caching = TTM_PL_FLAG_WC;
147 break;
148 default:
149 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
150 return -EINVAL;
151 }
152 return 0;
153}
154
155static void
156vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
157{
158 struct vbox_bo *vboxbo = vbox_bo(bo);
159
160 if (!vbox_ttm_bo_is_vbox_bo(bo))
161 return;
162
163 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
164 *pl = vboxbo->placement;
165}
166
167static int vbox_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
168{
169 return 0;
170}
171
172static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
173 struct ttm_mem_reg *mem)
174{
175 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
176 struct vbox_private *vbox = vbox_bdev(bdev);
177
178 mem->bus.addr = NULL;
179 mem->bus.offset = 0;
180 mem->bus.size = mem->num_pages << PAGE_SHIFT;
181 mem->bus.base = 0;
182 mem->bus.is_iomem = false;
183 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
184 return -EINVAL;
185 switch (mem->mem_type) {
186 case TTM_PL_SYSTEM:
187 /* system memory */
188 return 0;
189 case TTM_PL_VRAM:
190 mem->bus.offset = mem->start << PAGE_SHIFT;
191 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
192 mem->bus.is_iomem = true;
193 break;
194 default:
195 return -EINVAL;
196 break;
197 }
198 return 0;
199}
200
201static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
202{
203}
204
205static int vbox_bo_move(struct ttm_buffer_object *bo,
206 bool evict, bool interruptible,
207 bool no_wait_gpu,
208 struct ttm_mem_reg *new_mem)
209{
210 int r;
211#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
212 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
213#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
214 r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
215#else
216 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
217#endif
218 return r;
219}
220
221
222static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
223{
224 ttm_tt_fini(tt);
225 kfree(tt);
226}
227
228static struct ttm_backend_func vbox_tt_backend_func = {
229 .destroy = &vbox_ttm_backend_destroy,
230};
231
232
233static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
234 unsigned long size, uint32_t page_flags,
235 struct page *dummy_read_page)
236{
237 struct ttm_tt *tt;
238
239 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
240 if (tt == NULL)
241 return NULL;
242 tt->func = &vbox_tt_backend_func;
243 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
244 kfree(tt);
245 return NULL;
246 }
247 return tt;
248}
249
250static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
251{
252 return ttm_pool_populate(ttm);
253}
254
255static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
256{
257 ttm_pool_unpopulate(ttm);
258}
259
260struct ttm_bo_driver vbox_bo_driver = {
261 .ttm_tt_create = vbox_ttm_tt_create,
262 .ttm_tt_populate = vbox_ttm_tt_populate,
263 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
264 .init_mem_type = vbox_bo_init_mem_type,
265#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
266 .eviction_valuable = ttm_bo_eviction_valuable,
267#endif
268 .evict_flags = vbox_bo_evict_flags,
269 .move = vbox_bo_move,
270 .verify_access = vbox_bo_verify_access,
271 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
272 .io_mem_free = &vbox_ttm_io_mem_free,
273#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
274 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
275#endif
276#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
277 .lru_tail = &ttm_bo_default_lru_tail,
278 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
279#endif
280};
281
282int vbox_mm_init(struct vbox_private *vbox)
283{
284 int ret;
285 struct drm_device *dev = vbox->dev;
286 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
287
288 ret = vbox_ttm_global_init(vbox);
289 if (ret)
290 return ret;
291
292 ret = ttm_bo_device_init(&vbox->ttm.bdev,
293 vbox->ttm.bo_global_ref.ref.object,
294 &vbox_bo_driver,
295#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
296 dev->anon_inode->i_mapping,
297#endif
298 DRM_FILE_PAGE_OFFSET,
299 true);
300 if (ret) {
301 DRM_ERROR("Error initialising bo driver; %d\n", ret);
302 return ret;
303 }
304
305 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
306 vbox->available_vram_size >> PAGE_SHIFT);
307 if (ret) {
308 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
309 return ret;
310 }
311
312#ifdef DRM_MTRR_WC
313 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
314 pci_resource_len(dev->pdev, 0),
315 DRM_MTRR_WC);
316#else
317 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
318 pci_resource_len(dev->pdev, 0));
319#endif
320
321 vbox->ttm.mm_initialised = true;
322 return 0;
323}
324
325void vbox_mm_fini(struct vbox_private *vbox)
326{
327#ifdef DRM_MTRR_WC
328 struct drm_device *dev = vbox->dev;
329#endif
330 if (!vbox->ttm.mm_initialised)
331 return;
332 ttm_bo_device_release(&vbox->ttm.bdev);
333
334 vbox_ttm_global_release(vbox);
335
336#ifdef DRM_MTRR_WC
337 drm_mtrr_del(vbox->fb_mtrr,
338 pci_resource_start(dev->pdev, 0),
339 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
340#else
341 arch_phys_wc_del(vbox->fb_mtrr);
342#endif
343}
344
345void vbox_ttm_placement(struct vbox_bo *bo, int domain)
346{
347 u32 c = 0;
348#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
349 bo->placement.fpfn = 0;
350 bo->placement.lpfn = 0;
351#else
352 unsigned i;
353#endif
354
355 bo->placement.placement = bo->placements;
356 bo->placement.busy_placement = bo->placements;
357 if (domain & TTM_PL_FLAG_VRAM)
358 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
359 if (domain & TTM_PL_FLAG_SYSTEM)
360 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
361 if (!c)
362 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
363 bo->placement.num_placement = c;
364 bo->placement.num_busy_placement = c;
365#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
366 for (i = 0; i < c; ++i) {
367 bo->placements[i].fpfn = 0;
368 bo->placements[i].lpfn = 0;
369 }
370#endif
371}
372
373int vbox_bo_create(struct drm_device *dev, int size, int align,
374 uint32_t flags, struct vbox_bo **pvboxbo)
375{
376 struct vbox_private *vbox = dev->dev_private;
377 struct vbox_bo *vboxbo;
378 size_t acc_size;
379 int ret;
380
381 vboxbo = kzalloc(sizeof(struct vbox_bo), GFP_KERNEL);
382 if (!vboxbo)
383 return -ENOMEM;
384
385 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
386 if (ret) {
387 kfree(vboxbo);
388 return ret;
389 }
390
391 vboxbo->bo.bdev = &vbox->ttm.bdev;
392#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
393 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
394#endif
395
396 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
397
398 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
399 sizeof(struct vbox_bo));
400
401 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
402 ttm_bo_type_device, &vboxbo->placement,
403 align >> PAGE_SHIFT, false, NULL, acc_size,
404#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
405 NULL,
406#endif
407 NULL, vbox_bo_ttm_destroy);
408 if (ret)
409 return ret;
410
411 *pvboxbo = vboxbo;
412 return 0;
413}
414
415static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
416{
417 return bo->bo.offset;
418}
419
420int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
421{
422 int i, ret;
423
424 if (bo->pin_count) {
425 bo->pin_count++;
426 if (gpu_addr)
427 *gpu_addr = vbox_bo_gpu_offset(bo);
428 return 0;
429 }
430
431 vbox_ttm_placement(bo, pl_flag);
432 for (i = 0; i < bo->placement.num_placement; i++)
433 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
435 if (ret)
436 return ret;
437
438 bo->pin_count = 1;
439 if (gpu_addr)
440 *gpu_addr = vbox_bo_gpu_offset(bo);
441 return 0;
442}
443
444int vbox_bo_unpin(struct vbox_bo *bo)
445{
446 int i, ret;
447 if (!bo->pin_count) {
448 DRM_ERROR("unpin bad %p\n", bo);
449 return 0;
450 }
451 bo->pin_count--;
452 if (bo->pin_count)
453 return 0;
454
455 for (i = 0; i < bo->placement.num_placement ; i++)
456 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
457 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
458 if (ret)
459 return ret;
460
461 return 0;
462}
463
464/* Move a vbox-owned buffer object to system memory if no one else has it
465 * pinned. The caller must have pinned it previously, and this call will
466 * release the caller's pin. */
467int vbox_bo_push_sysram(struct vbox_bo *bo)
468{
469 int i, ret;
470 if (!bo->pin_count) {
471 DRM_ERROR("unpin bad %p\n", bo);
472 return 0;
473 }
474 bo->pin_count--;
475 if (bo->pin_count)
476 return 0;
477
478 if (bo->kmap.virtual)
479 ttm_bo_kunmap(&bo->kmap);
480
481 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
482 for (i = 0; i < bo->placement.num_placement ; i++)
483 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
484
485 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
486 if (ret) {
487 DRM_ERROR("pushing to VRAM failed\n");
488 return ret;
489 }
490 return 0;
491}
492
493int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
494{
495 struct drm_file *file_priv;
496 struct vbox_private *vbox;
497
498 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
499 return -EINVAL;
500
501 file_priv = filp->private_data;
502 vbox = file_priv->minor->dev->dev_private;
503 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
504}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette