VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 87092

Last change on this file since 87092 was 87092, checked in by vboxsync, 4 years ago

Additions/linux/drm: Adjustment for Linux 5.10.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.8 KB
Line 
1/* $Id: vbox_ttm.c 87092 2020-12-15 22:29:56Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2020 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36#include <drm/ttm/ttm_page_alloc.h>
37
38#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
39#define PLACEMENT_FLAGS(placement) (placement)
40#else
41#define PLACEMENT_FLAGS(placement) ((placement).flags)
42#endif
43
44
45static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
46{
47 return container_of(bd, struct vbox_private, ttm.bdev);
48}
49
50#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
51static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
52{
53 return ttm_mem_global_init(ref->object);
54}
55
56static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
57{
58 ttm_mem_global_release(ref->object);
59}
60
61/**
62 * Adds the vbox memory manager object/structures to the global memory manager.
63 */
64static int vbox_ttm_global_init(struct vbox_private *vbox)
65{
66 struct drm_global_reference *global_ref;
67 int ret;
68
69#if RTLNX_VER_MAX(5,0,0)
70 global_ref = &vbox->ttm.mem_global_ref;
71 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
72 global_ref->size = sizeof(struct ttm_mem_global);
73 global_ref->init = &vbox_ttm_mem_global_init;
74 global_ref->release = &vbox_ttm_mem_global_release;
75 ret = drm_global_item_ref(global_ref);
76 if (ret) {
77 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
78 return ret;
79 }
80
81 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
82#endif
83 global_ref = &vbox->ttm.bo_global_ref.ref;
84 global_ref->global_type = DRM_GLOBAL_TTM_BO;
85 global_ref->size = sizeof(struct ttm_bo_global);
86 global_ref->init = &ttm_bo_global_init;
87 global_ref->release = &ttm_bo_global_release;
88
89 ret = drm_global_item_ref(global_ref);
90 if (ret) {
91 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
92#if RTLNX_VER_MAX(5,0,0)
93 drm_global_item_unref(&vbox->ttm.mem_global_ref);
94#endif
95 return ret;
96 }
97
98 return 0;
99}
100
101/**
102 * Removes the vbox memory manager object from the global memory manager.
103 */
104static void vbox_ttm_global_release(struct vbox_private *vbox)
105{
106 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
107 drm_global_item_unref(&vbox->ttm.mem_global_ref);
108}
109#endif
110
111static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
112{
113 struct vbox_bo *bo;
114
115 bo = container_of(tbo, struct vbox_bo, bo);
116
117 drm_gem_object_release(&bo->gem);
118 kfree(bo);
119}
120
121static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
122{
123 if (bo->destroy == &vbox_bo_ttm_destroy)
124 return true;
125
126 return false;
127}
128
129#if RTLNX_VER_MAX(5,10,0)
130static int
131vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
132 struct ttm_mem_type_manager *man)
133{
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
137 man->available_caching = TTM_PL_MASK_CACHING;
138 man->default_caching = TTM_PL_FLAG_CACHED;
139 break;
140 case TTM_PL_VRAM:
141 man->func = &ttm_bo_manager_func;
142 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
143 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
144 man->default_caching = TTM_PL_FLAG_WC;
145 break;
146 default:
147 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
148 return -EINVAL;
149 }
150
151 return 0;
152}
153#endif
154
155static void
156vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
157{
158 struct vbox_bo *vboxbo = vbox_bo(bo);
159
160 if (!vbox_ttm_bo_is_vbox_bo(bo))
161 return;
162
163 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
164 *pl = vboxbo->placement;
165}
166
167static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
168 struct file *filp)
169{
170 return 0;
171}
172
173#if RTLNX_VER_MAX(5,10,0)
174static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
175 struct ttm_mem_reg *mem)
176{
177 struct vbox_private *vbox = vbox_bdev(bdev);
178 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
179
180 mem->bus.addr = NULL;
181 mem->bus.offset = 0;
182 mem->bus.size = mem->num_pages << PAGE_SHIFT;
183 mem->bus.base = 0;
184 mem->bus.is_iomem = false;
185 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
186 return -EINVAL;
187 switch (mem->mem_type) {
188 case TTM_PL_SYSTEM:
189 /* system memory */
190 return 0;
191 case TTM_PL_VRAM:
192 mem->bus.offset = mem->start << PAGE_SHIFT;
193 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
194 mem->bus.is_iomem = true;
195 break;
196 default:
197 return -EINVAL;
198 }
199 return 0;
200}
201#else
202static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
203 struct ttm_resource *mem)
204{
205 struct vbox_private *vbox = vbox_bdev(bdev);
206 mem->bus.addr = NULL;
207 mem->bus.offset = 0;
208 mem->size = mem->num_pages << PAGE_SHIFT;
209 mem->start = 0;
210 mem->bus.is_iomem = false;
211 switch (mem->mem_type) {
212 case TTM_PL_SYSTEM:
213 /* system memory */
214 return 0;
215 case TTM_PL_VRAM:
216 mem->bus.offset = mem->start << PAGE_SHIFT;
217 mem->start = pci_resource_start(vbox->dev->pdev, 0);
218 mem->bus.is_iomem = true;
219 break;
220 default:
221 return -EINVAL;
222 }
223 return 0;
224}
225#endif
226
227
228
229#if RTLNX_VER_MIN(5,10,0)
230static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
231 struct ttm_resource *mem)
232{
233}
234#else
235static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
236 struct ttm_mem_reg *mem)
237{
238}
239#endif
240
241#if RTLNX_VER_MIN(5,10,0)
242static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
243{
244 ttm_tt_fini(tt);
245 kfree(tt);
246}
247#else
248static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
249{
250 ttm_tt_fini(tt);
251 kfree(tt);
252}
253
254static struct ttm_backend_func vbox_tt_backend_func = {
255 .destroy = &vbox_ttm_backend_destroy,
256};
257#endif
258
259#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
260static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
261 unsigned long size,
262 u32 page_flags,
263 struct page *dummy_read_page)
264#else
265static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
266 u32 page_flags)
267#endif
268{
269 struct ttm_tt *tt;
270
271 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
272 if (!tt)
273 return NULL;
274
275#if RTLNX_VER_MAX(5,10,0)
276 tt->func = &vbox_tt_backend_func;
277#endif
278#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
279 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
280#else
281 if (ttm_tt_init(tt, bo, page_flags)) {
282#endif
283 kfree(tt);
284 return NULL;
285 }
286
287 return tt;
288}
289
290#if RTLNX_VER_MAX(4,17,0)
291# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
292static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
293{
294 return ttm_pool_populate(ttm);
295}
296# else
297static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
298 struct ttm_operation_ctx *ctx)
299{
300 return ttm_pool_populate(ttm, ctx);
301}
302# endif
303
304static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
305{
306 ttm_pool_unpopulate(ttm);
307}
308#endif
309
310static struct ttm_bo_driver vbox_bo_driver = {
311 .ttm_tt_create = vbox_ttm_tt_create,
312#if RTLNX_VER_MIN(5,10,0)
313 .ttm_tt_destroy = vbox_ttm_tt_destroy,
314#endif
315#if RTLNX_VER_MAX(4,17,0)
316 .ttm_tt_populate = vbox_ttm_tt_populate,
317 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
318#endif
319#if RTLNX_VER_MAX(5,10,0)
320 .init_mem_type = vbox_bo_init_mem_type,
321#endif
322#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
323 .eviction_valuable = ttm_bo_eviction_valuable,
324#endif
325 .evict_flags = vbox_bo_evict_flags,
326 .verify_access = vbox_bo_verify_access,
327 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
328 .io_mem_free = &vbox_ttm_io_mem_free,
329#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
330# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
331 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
332# endif
333#endif
334#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
335 .lru_tail = &ttm_bo_default_lru_tail,
336 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
337#endif
338};
339
340int vbox_mm_init(struct vbox_private *vbox)
341{
342 int ret;
343 struct drm_device *dev = vbox->dev;
344 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
345
346#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
347 ret = vbox_ttm_global_init(vbox);
348 if (ret)
349 return ret;
350#endif
351 ret = ttm_bo_device_init(&vbox->ttm.bdev,
352#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
353 vbox->ttm.bo_global_ref.ref.object,
354#endif
355 &vbox_bo_driver,
356#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
357 dev->anon_inode->i_mapping,
358#endif
359#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3)
360 dev->vma_offset_manager,
361#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
362 DRM_FILE_PAGE_OFFSET,
363#endif
364 true);
365 if (ret) {
366 DRM_ERROR("Error initialising bo driver; %d\n", ret);
367#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
368 goto err_ttm_global_release;
369#else
370 return ret;
371#endif
372 }
373
374#if RTLNX_VER_MIN(5,10,0)
375 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
376 vbox->available_vram_size >> PAGE_SHIFT);
377#else
378 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
379 vbox->available_vram_size >> PAGE_SHIFT);
380#endif
381 if (ret) {
382 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
383 goto err_device_release;
384 }
385
386#ifdef DRM_MTRR_WC
387 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
388 pci_resource_len(dev->pdev, 0),
389 DRM_MTRR_WC);
390#else
391 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
392 pci_resource_len(dev->pdev, 0));
393#endif
394 return 0;
395
396err_device_release:
397 ttm_bo_device_release(&vbox->ttm.bdev);
398#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
399err_ttm_global_release:
400 vbox_ttm_global_release(vbox);
401#endif
402 return ret;
403}
404
405void vbox_mm_fini(struct vbox_private *vbox)
406{
407#ifdef DRM_MTRR_WC
408 drm_mtrr_del(vbox->fb_mtrr,
409 pci_resource_start(vbox->dev->pdev, 0),
410 pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
411#else
412 arch_phys_wc_del(vbox->fb_mtrr);
413#endif
414 ttm_bo_device_release(&vbox->ttm.bdev);
415#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
416 vbox_ttm_global_release(vbox);
417#endif
418}
419
420void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
421{
422 u32 c = 0;
423#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
424 bo->placement.fpfn = 0;
425 bo->placement.lpfn = 0;
426#else
427 unsigned int i;
428#endif
429
430 bo->placement.placement = bo->placements;
431 bo->placement.busy_placement = bo->placements;
432
433 if (mem_type & VBOX_MEM_TYPE_VRAM) {
434#if RTLNX_VER_MIN(5,10,0)
435 bo->placements[c].mem_type = TTM_PL_VRAM;
436 PLACEMENT_FLAGS(bo->placements[c++]) =
437 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
438#else
439 PLACEMENT_FLAGS(bo->placements[c++]) =
440 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
441#endif
442 }
443 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
444#if RTLNX_VER_MIN(5,10,0)
445 bo->placements[c].mem_type = TTM_PL_SYSTEM;
446 PLACEMENT_FLAGS(bo->placements[c++]) =
447 TTM_PL_MASK_CACHING;
448#else
449 PLACEMENT_FLAGS(bo->placements[c++]) =
450 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
451#endif
452 }
453 if (!c) {
454#if RTLNX_VER_MIN(5,10,0)
455 bo->placements[c].mem_type = TTM_PL_SYSTEM;
456 PLACEMENT_FLAGS(bo->placements[c++]) =
457 TTM_PL_MASK_CACHING;
458#else
459 PLACEMENT_FLAGS(bo->placements[c++]) =
460 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
461#endif
462 }
463
464 bo->placement.num_placement = c;
465 bo->placement.num_busy_placement = c;
466
467#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
468 for (i = 0; i < c; ++i) {
469 bo->placements[i].fpfn = 0;
470 bo->placements[i].lpfn = 0;
471 }
472#endif
473}
474
475int vbox_bo_create(struct drm_device *dev, int size, int align,
476 u32 flags, struct vbox_bo **pvboxbo)
477{
478 struct vbox_private *vbox = dev->dev_private;
479 struct vbox_bo *vboxbo;
480 size_t acc_size;
481 int ret;
482
483 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
484 if (!vboxbo)
485 return -ENOMEM;
486
487 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
488 if (ret)
489 goto err_free_vboxbo;
490
491 vboxbo->bo.bdev = &vbox->ttm.bdev;
492#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
493 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
494#endif
495
496 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_VRAM | VBOX_MEM_TYPE_SYSTEM);
497
498 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
499 sizeof(struct vbox_bo));
500
501 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
502 ttm_bo_type_device, &vboxbo->placement,
503#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
504 align >> PAGE_SHIFT, false, NULL, acc_size,
505#else
506 align >> PAGE_SHIFT, false, acc_size,
507#endif
508#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
509 NULL, NULL, vbox_bo_ttm_destroy);
510#else
511 NULL, vbox_bo_ttm_destroy);
512#endif
513 if (ret)
514 goto err_free_vboxbo;
515
516 *pvboxbo = vboxbo;
517
518 return 0;
519
520err_free_vboxbo:
521 kfree(vboxbo);
522 return ret;
523}
524
525static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
526{
527#if RTLNX_VER_MIN(5,9,0)
528 return bo->bo.mem.start << PAGE_SHIFT;
529#else
530 return bo->bo.offset;
531#endif
532}
533
534int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
535{
536#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
537 struct ttm_operation_ctx ctx = { false, false };
538#endif
539 int i, ret;
540
541 if (bo->pin_count) {
542 bo->pin_count++;
543 if (gpu_addr)
544 *gpu_addr = vbox_bo_gpu_offset(bo);
545
546 return 0;
547 }
548
549 vbox_ttm_placement(bo, mem_type);
550
551 for (i = 0; i < bo->placement.num_placement; i++)
552 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
553
554#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
555 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
556#else
557 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
558#endif
559 if (ret)
560 return ret;
561
562 bo->pin_count = 1;
563
564 if (gpu_addr)
565 *gpu_addr = vbox_bo_gpu_offset(bo);
566
567 return 0;
568}
569
570int vbox_bo_unpin(struct vbox_bo *bo)
571{
572#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
573 struct ttm_operation_ctx ctx = { false, false };
574#endif
575 int i, ret;
576
577 if (!bo->pin_count) {
578 DRM_ERROR("unpin bad %p\n", bo);
579 return 0;
580 }
581 bo->pin_count--;
582 if (bo->pin_count)
583 return 0;
584
585 for (i = 0; i < bo->placement.num_placement; i++)
586 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
587
588#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
589 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
590#else
591 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
592#endif
593 if (ret)
594 return ret;
595
596 return 0;
597}
598
599/*
600 * Move a vbox-owned buffer object to system memory if no one else has it
601 * pinned. The caller must have pinned it previously, and this call will
602 * release the caller's pin.
603 */
604int vbox_bo_push_sysram(struct vbox_bo *bo)
605{
606#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
607 struct ttm_operation_ctx ctx = { false, false };
608#endif
609 int i, ret;
610
611 if (!bo->pin_count) {
612 DRM_ERROR("unpin bad %p\n", bo);
613 return 0;
614 }
615 bo->pin_count--;
616 if (bo->pin_count)
617 return 0;
618
619 if (bo->kmap.virtual)
620 ttm_bo_kunmap(&bo->kmap);
621
622 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
623
624 for (i = 0; i < bo->placement.num_placement; i++)
625 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
626
627#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
628 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
629#else
630 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
631#endif
632 if (ret) {
633 DRM_ERROR("pushing to VRAM failed\n");
634 return ret;
635 }
636
637 return 0;
638}
639
640int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
641{
642 struct drm_file *file_priv;
643 struct vbox_private *vbox;
644
645 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
646 return -EINVAL;
647
648 file_priv = filp->private_data;
649 vbox = file_priv->minor->dev->dev_private;
650
651 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
652}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette