VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 71947

Last change on this file since 71947 was 71947, checked in by vboxsync, 7 years ago

Additions/linux/drm: update drm driver to work with EL7.5, standard kernel.
No bugref.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.9 KB
Line 
1/* $Id: vbox_ttm.c 71947 2018-04-20 14:59:20Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36#include <ttm/ttm_page_alloc.h>
37
38#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_73)
39#define PLACEMENT_FLAGS(placement) (placement)
40#else
41#define PLACEMENT_FLAGS(placement) ((placement).flags)
42#endif
43
44static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
45{
46 return container_of(bd, struct vbox_private, ttm.bdev);
47}
48
49static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
50{
51 return ttm_mem_global_init(ref->object);
52}
53
54static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
55{
56 ttm_mem_global_release(ref->object);
57}
58
59/**
60 * Adds the vbox memory manager object/structures to the global memory manager.
61 */
62static int vbox_ttm_global_init(struct vbox_private *vbox)
63{
64 struct drm_global_reference *global_ref;
65 int r;
66
67 global_ref = &vbox->ttm.mem_global_ref;
68 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
69 global_ref->size = sizeof(struct ttm_mem_global);
70 global_ref->init = &vbox_ttm_mem_global_init;
71 global_ref->release = &vbox_ttm_mem_global_release;
72 r = drm_global_item_ref(global_ref);
73 if (r != 0) {
74 DRM_ERROR("Failed setting up TTM memory accounting subsystem.\n");
75 return r;
76 }
77
78 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
79 global_ref = &vbox->ttm.bo_global_ref.ref;
80 global_ref->global_type = DRM_GLOBAL_TTM_BO;
81 global_ref->size = sizeof(struct ttm_bo_global);
82 global_ref->init = &ttm_bo_global_init;
83 global_ref->release = &ttm_bo_global_release;
84
85 r = drm_global_item_ref(global_ref);
86 if (r != 0) {
87 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
88 drm_global_item_unref(&vbox->ttm.mem_global_ref);
89 return r;
90 }
91
92 return 0;
93}
94
95/**
96 * Removes the vbox memory manager object from the global memory manager.
97 */
98static void vbox_ttm_global_release(struct vbox_private *vbox)
99{
100 if (!vbox->ttm.mem_global_ref.release)
101 return;
102
103 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
104 drm_global_item_unref(&vbox->ttm.mem_global_ref);
105 vbox->ttm.mem_global_ref.release = NULL;
106}
107
108static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
109{
110 struct vbox_bo *bo;
111
112 bo = container_of(tbo, struct vbox_bo, bo);
113
114 drm_gem_object_release(&bo->gem);
115 kfree(bo);
116}
117
118static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
119{
120 if (bo->destroy == &vbox_bo_ttm_destroy)
121 return true;
122
123 return false;
124}
125
126static int
127vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
128 struct ttm_mem_type_manager *man)
129{
130 switch (type) {
131 case TTM_PL_SYSTEM:
132 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
133 man->available_caching = TTM_PL_MASK_CACHING;
134 man->default_caching = TTM_PL_FLAG_CACHED;
135 break;
136 case TTM_PL_VRAM:
137 man->func = &ttm_bo_manager_func;
138 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
139 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
140 man->default_caching = TTM_PL_FLAG_WC;
141 break;
142 default:
143 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
144 return -EINVAL;
145 }
146
147 return 0;
148}
149
150static void
151vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
152{
153 struct vbox_bo *vboxbo = vbox_bo(bo);
154
155 if (!vbox_ttm_bo_is_vbox_bo(bo))
156 return;
157
158 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
159 *pl = vboxbo->placement;
160}
161
162static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
163 struct file *filp)
164{
165 return 0;
166}
167
168static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
169 struct ttm_mem_reg *mem)
170{
171 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
172 struct vbox_private *vbox = vbox_bdev(bdev);
173
174 mem->bus.addr = NULL;
175 mem->bus.offset = 0;
176 mem->bus.size = mem->num_pages << PAGE_SHIFT;
177 mem->bus.base = 0;
178 mem->bus.is_iomem = false;
179 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
180 return -EINVAL;
181 switch (mem->mem_type) {
182 case TTM_PL_SYSTEM:
183 /* system memory */
184 return 0;
185 case TTM_PL_VRAM:
186 mem->bus.offset = mem->start << PAGE_SHIFT;
187 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
188 mem->bus.is_iomem = true;
189 break;
190 default:
191 return -EINVAL;
192 }
193 return 0;
194}
195
196static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
197 struct ttm_mem_reg *mem)
198{
199}
200
201static int vbox_bo_move(struct ttm_buffer_object *bo,
202 bool evict, bool interruptible,
203 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
204{
205 int r;
206
207#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && !defined(RHEL_74)
208 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
209#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) && !defined(RHEL_74)
210 r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
211#else
212 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
213#endif
214 return r;
215}
216
217static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
218{
219 ttm_tt_fini(tt);
220 kfree(tt);
221}
222
223static struct ttm_backend_func vbox_tt_backend_func = {
224 .destroy = &vbox_ttm_backend_destroy,
225};
226
227static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
228 unsigned long size,
229 u32 page_flags,
230 struct page *dummy_read_page)
231{
232 struct ttm_tt *tt;
233
234 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
235 if (!tt)
236 return NULL;
237
238 tt->func = &vbox_tt_backend_func;
239 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
240 kfree(tt);
241 return NULL;
242 }
243
244 return tt;
245}
246
247static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
248{
249 return ttm_pool_populate(ttm);
250}
251
252static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
253{
254 ttm_pool_unpopulate(ttm);
255}
256
257struct ttm_bo_driver vbox_bo_driver = {
258 .ttm_tt_create = vbox_ttm_tt_create,
259 .ttm_tt_populate = vbox_ttm_tt_populate,
260 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
261 .init_mem_type = vbox_bo_init_mem_type,
262#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || defined(RHEL_74)
263 .eviction_valuable = ttm_bo_eviction_valuable,
264#endif
265 .evict_flags = vbox_bo_evict_flags,
266 .move = vbox_bo_move,
267 .verify_access = vbox_bo_verify_access,
268 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
269 .io_mem_free = &vbox_ttm_io_mem_free,
270#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(RHEL_75)
271 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
272#endif
273#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) \
274 || defined(RHEL_74)
275# ifndef RHEL_75
276 .lru_tail = &ttm_bo_default_lru_tail,
277 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
278# endif
279#endif
280};
281
282int vbox_mm_init(struct vbox_private *vbox)
283{
284 int ret;
285 struct drm_device *dev = vbox->dev;
286 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
287
288 ret = vbox_ttm_global_init(vbox);
289 if (ret)
290 return ret;
291
292 ret = ttm_bo_device_init(&vbox->ttm.bdev,
293 vbox->ttm.bo_global_ref.ref.object,
294 &vbox_bo_driver,
295#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) || defined(RHEL_73)
296 dev->anon_inode->i_mapping,
297#endif
298 DRM_FILE_PAGE_OFFSET, true);
299 if (ret) {
300 DRM_ERROR("Error initialising bo driver; %d\n", ret);
301 return ret;
302 }
303
304 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
305 vbox->available_vram_size >> PAGE_SHIFT);
306 if (ret) {
307 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
308 return ret;
309 }
310#ifdef DRM_MTRR_WC
311 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
312 pci_resource_len(dev->pdev, 0),
313 DRM_MTRR_WC);
314#else
315 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
316 pci_resource_len(dev->pdev, 0));
317#endif
318
319 vbox->ttm.mm_initialised = true;
320
321 return 0;
322}
323
324void vbox_mm_fini(struct vbox_private *vbox)
325{
326#ifdef DRM_MTRR_WC
327 struct drm_device *dev = vbox->dev;
328#endif
329 if (!vbox->ttm.mm_initialised)
330 return;
331 ttm_bo_device_release(&vbox->ttm.bdev);
332
333 vbox_ttm_global_release(vbox);
334
335#ifdef DRM_MTRR_WC
336 drm_mtrr_del(vbox->fb_mtrr,
337 pci_resource_start(dev->pdev, 0),
338 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
339#else
340 arch_phys_wc_del(vbox->fb_mtrr);
341#endif
342}
343
344void vbox_ttm_placement(struct vbox_bo *bo, int domain)
345{
346 u32 c = 0;
347#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_73)
348 bo->placement.fpfn = 0;
349 bo->placement.lpfn = 0;
350#else
351 unsigned int i;
352#endif
353
354 bo->placement.placement = bo->placements;
355 bo->placement.busy_placement = bo->placements;
356
357 if (domain & TTM_PL_FLAG_VRAM)
358 PLACEMENT_FLAGS(bo->placements[c++]) =
359 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
360 if (domain & TTM_PL_FLAG_SYSTEM)
361 PLACEMENT_FLAGS(bo->placements[c++]) =
362 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
363 if (!c)
364 PLACEMENT_FLAGS(bo->placements[c++]) =
365 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
366
367 bo->placement.num_placement = c;
368 bo->placement.num_busy_placement = c;
369#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
370 for (i = 0; i < c; ++i) {
371 bo->placements[i].fpfn = 0;
372 bo->placements[i].lpfn = 0;
373 }
374#endif
375}
376
377int vbox_bo_create(struct drm_device *dev, int size, int align,
378 u32 flags, struct vbox_bo **pvboxbo)
379{
380 struct vbox_private *vbox = dev->dev_private;
381 struct vbox_bo *vboxbo;
382 size_t acc_size;
383 int ret;
384
385 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
386 if (!vboxbo)
387 return -ENOMEM;
388
389 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
390 if (ret) {
391 kfree(vboxbo);
392 return ret;
393 }
394
395 vboxbo->bo.bdev = &vbox->ttm.bdev;
396#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(RHEL_73)
397 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
398#endif
399
400 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
401
402 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
403 sizeof(struct vbox_bo));
404
405 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
406 ttm_bo_type_device, &vboxbo->placement,
407 align >> PAGE_SHIFT, false, NULL, acc_size,
408#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
409 NULL,
410#endif
411 NULL, vbox_bo_ttm_destroy);
412 if (ret)
413 return ret;
414
415 *pvboxbo = vboxbo;
416
417 return 0;
418}
419
420static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
421{
422 return bo->bo.offset;
423}
424
425int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
426{
427 int i, ret;
428
429 if (bo->pin_count) {
430 bo->pin_count++;
431 if (gpu_addr)
432 *gpu_addr = vbox_bo_gpu_offset(bo);
433
434 return 0;
435 }
436
437 vbox_ttm_placement(bo, pl_flag);
438
439 for (i = 0; i < bo->placement.num_placement; i++)
440 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
441
442 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
443 if (ret)
444 return ret;
445
446 bo->pin_count = 1;
447
448 if (gpu_addr)
449 *gpu_addr = vbox_bo_gpu_offset(bo);
450
451 return 0;
452}
453
454int vbox_bo_unpin(struct vbox_bo *bo)
455{
456 int i, ret;
457
458 if (!bo->pin_count) {
459 DRM_ERROR("unpin bad %p\n", bo);
460 return 0;
461 }
462 bo->pin_count--;
463 if (bo->pin_count)
464 return 0;
465
466 for (i = 0; i < bo->placement.num_placement; i++)
467 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
468
469 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
470 if (ret)
471 return ret;
472
473 return 0;
474}
475
476/*
477 * Move a vbox-owned buffer object to system memory if no one else has it
478 * pinned. The caller must have pinned it previously, and this call will
479 * release the caller's pin.
480 */
481int vbox_bo_push_sysram(struct vbox_bo *bo)
482{
483 int i, ret;
484
485 if (!bo->pin_count) {
486 DRM_ERROR("unpin bad %p\n", bo);
487 return 0;
488 }
489 bo->pin_count--;
490 if (bo->pin_count)
491 return 0;
492
493 if (bo->kmap.virtual)
494 ttm_bo_kunmap(&bo->kmap);
495
496 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
497
498 for (i = 0; i < bo->placement.num_placement; i++)
499 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
500
501 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
502 if (ret) {
503 DRM_ERROR("pushing to VRAM failed\n");
504 return ret;
505 }
506
507 return 0;
508}
509
510int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
511{
512 struct drm_file *file_priv;
513 struct vbox_private *vbox;
514
515 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
516 return -EINVAL;
517
518 file_priv = filp->private_data;
519 vbox = file_priv->minor->dev->dev_private;
520
521 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
522}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette