VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 108573

Last change on this file since 108573 was 108573, checked in by vboxsync, 5 weeks ago

Additions: Linux: vboxvideo: Attempt to fix cursor image corruption on .page_flip, bugref:9240.

Somehow DRM stack gets confuced on .page_flip when BO has both
vram and system placements set in the same time. This change seem
to help and also does not trigger noticable regression with old guests.
The change is rather experimental though and might be revisited if issues occur.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.8 KB
Line 
1/* $Id: vbox_ttm.c 108573 2025-03-17 13:41:04Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36
37#if RTLNX_VER_MIN(6,3,0) || RTLNX_RHEL_RANGE(8,9, 8,99) || RTLNX_RHEL_MAJ_PREREQ(9,3)
38# include <drm/ttm/ttm_tt.h>
39#endif
40
41#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_MAJ_PREREQ(8,5)
42# include <drm/drm_gem.h>
43# include <drm/drm_gem_ttm_helper.h>
44# include <drm/drm_gem_vram_helper.h>
45#else
46# include <drm/ttm/ttm_page_alloc.h>
47#endif
48
49#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
50# include <drm/ttm/ttm_range_manager.h>
51#endif
52
53#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
54#define PLACEMENT_FLAGS(placement) (placement)
55#else
56#define PLACEMENT_FLAGS(placement) ((placement).flags)
57#endif
58
59
60#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
61static inline struct vbox_private *vbox_bdev(struct ttm_device *bd)
62#else
63static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
64#endif
65{
66 return container_of(bd, struct vbox_private, ttm.bdev);
67}
68
69#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
70static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
71{
72 return ttm_mem_global_init(ref->object);
73}
74
75static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
76{
77 ttm_mem_global_release(ref->object);
78}
79
80/**
81 * Adds the vbox memory manager object/structures to the global memory manager.
82 */
83static int vbox_ttm_global_init(struct vbox_private *vbox)
84{
85 struct drm_global_reference *global_ref;
86 int ret;
87
88#if RTLNX_VER_MAX(5,0,0)
89 global_ref = &vbox->ttm.mem_global_ref;
90 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
91 global_ref->size = sizeof(struct ttm_mem_global);
92 global_ref->init = &vbox_ttm_mem_global_init;
93 global_ref->release = &vbox_ttm_mem_global_release;
94 ret = drm_global_item_ref(global_ref);
95 if (ret) {
96 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
97 return ret;
98 }
99
100 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
101#endif
102 global_ref = &vbox->ttm.bo_global_ref.ref;
103 global_ref->global_type = DRM_GLOBAL_TTM_BO;
104 global_ref->size = sizeof(struct ttm_bo_global);
105 global_ref->init = &ttm_bo_global_init;
106 global_ref->release = &ttm_bo_global_release;
107
108 ret = drm_global_item_ref(global_ref);
109 if (ret) {
110 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
111#if RTLNX_VER_MAX(5,0,0)
112 drm_global_item_unref(&vbox->ttm.mem_global_ref);
113#endif
114 return ret;
115 }
116
117 return 0;
118}
119
120/**
121 * Removes the vbox memory manager object from the global memory manager.
122 */
123static void vbox_ttm_global_release(struct vbox_private *vbox)
124{
125 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
126 drm_global_item_unref(&vbox->ttm.mem_global_ref);
127}
128#endif
129
130static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
131{
132 struct vbox_bo *bo;
133
134 bo = container_of(tbo, struct vbox_bo, bo);
135
136 drm_gem_object_release(&bo->gem);
137 kfree(bo);
138}
139
140static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
141{
142 if (bo->destroy == &vbox_bo_ttm_destroy)
143 return true;
144
145 return false;
146}
147
148#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
149static int
150vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
151 struct ttm_mem_type_manager *man)
152{
153 switch (type) {
154 case TTM_PL_SYSTEM:
155 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
156 man->available_caching = TTM_PL_MASK_CACHING;
157 man->default_caching = TTM_PL_FLAG_CACHED;
158 break;
159 case TTM_PL_VRAM:
160 man->func = &ttm_bo_manager_func;
161 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
162 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
163 man->default_caching = TTM_PL_FLAG_WC;
164 break;
165 default:
166 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
167 return -EINVAL;
168 }
169
170 return 0;
171}
172#endif
173
174static void
175vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
176{
177 struct vbox_bo *vboxbo = vbox_bo(bo);
178
179 if (!vbox_ttm_bo_is_vbox_bo(bo))
180 return;
181
182 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
183 *pl = vboxbo->placement;
184}
185
186#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
187static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
188 struct file *filp)
189{
190 return 0;
191}
192#endif
193
194#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
195static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
196 struct ttm_mem_reg *mem)
197{
198 struct vbox_private *vbox = vbox_bdev(bdev);
199 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
200
201 mem->bus.addr = NULL;
202 mem->bus.offset = 0;
203 mem->bus.size = mem->num_pages << PAGE_SHIFT;
204 mem->bus.base = 0;
205 mem->bus.is_iomem = false;
206 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
207 return -EINVAL;
208 switch (mem->mem_type) {
209 case TTM_PL_SYSTEM:
210 /* system memory */
211 return 0;
212 case TTM_PL_VRAM:
213 mem->bus.offset = mem->start << PAGE_SHIFT;
214 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
215 mem->bus.is_iomem = true;
216 break;
217 default:
218 return -EINVAL;
219 }
220 return 0;
221}
222#else
223# if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
224static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
225 struct ttm_resource *mem)
226# else /* > 5.13.0 */
227static int vbox_ttm_io_mem_reserve(struct ttm_device *bdev,
228 struct ttm_resource *mem)
229# endif /* > 5.13.0 */
230{
231 struct vbox_private *vbox = vbox_bdev(bdev);
232 mem->bus.addr = NULL;
233 mem->bus.offset = 0;
234# if RTLNX_VER_MAX(5,12,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
235 mem->size = mem->num_pages << PAGE_SHIFT;
236# endif
237 mem->start = 0;
238 mem->bus.is_iomem = false;
239 switch (mem->mem_type) {
240 case TTM_PL_SYSTEM:
241 /* system memory */
242 return 0;
243 case TTM_PL_VRAM:
244# if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
245 mem->bus.caching = ttm_write_combined;
246# endif
247# if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
248 mem->bus.offset = (mem->start << PAGE_SHIFT) + pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
249# else
250 mem->bus.offset = mem->start << PAGE_SHIFT;
251 mem->start = pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
252# endif
253 mem->bus.is_iomem = true;
254 break;
255 default:
256 return -EINVAL;
257 }
258 return 0;
259}
260#endif
261
262
263
264#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
265static void vbox_ttm_io_mem_free(struct ttm_device *bdev,
266 struct ttm_resource *mem)
267{
268}
269#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
270static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
271 struct ttm_resource *mem)
272{
273}
274#else
275static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
276 struct ttm_mem_reg *mem)
277{
278}
279#endif
280
281#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
282static void vbox_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
283{
284 ttm_tt_fini(tt);
285 kfree(tt);
286}
287#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
288static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
289{
290 ttm_tt_fini(tt);
291 kfree(tt);
292}
293#else
294static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
295{
296 ttm_tt_fini(tt);
297 kfree(tt);
298}
299
300static struct ttm_backend_func vbox_tt_backend_func = {
301 .destroy = &vbox_ttm_backend_destroy,
302};
303#endif
304
305#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
306static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
307 unsigned long size,
308 u32 page_flags,
309 struct page *dummy_read_page)
310#else
311static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
312 u32 page_flags)
313#endif
314{
315 struct ttm_tt *tt;
316
317 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
318 if (!tt)
319 return NULL;
320
321#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
322 tt->func = &vbox_tt_backend_func;
323#endif
324#if RTLNX_VER_MIN(5,19,0) || RTLNX_RHEL_RANGE(8,8, 8,99) || RTLNX_RHEL_RANGE(9,2, 9,99) || RTLNX_SUSE_MAJ_PREREQ(15,5)
325 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined, 0)) {
326#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
327 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined)) {
328#elif RTLNX_VER_MIN(4,17,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
329 if (ttm_tt_init(tt, bo, page_flags)) {
330#else
331 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
332#endif
333
334 kfree(tt);
335 return NULL;
336 }
337
338 return tt;
339}
340
341#if RTLNX_VER_MAX(4,17,0)
342# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
343static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
344{
345 return ttm_pool_populate(ttm);
346}
347# else
348static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
349 struct ttm_operation_ctx *ctx)
350{
351 return ttm_pool_populate(ttm, ctx);
352}
353# endif
354
355static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
356{
357 ttm_pool_unpopulate(ttm);
358}
359#endif
360
361#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
362static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
363 struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem,
364 struct ttm_place *hop)
365{
366# if RTLNX_VER_MIN(6,4,0)
367 if (!bo->resource)
368 {
369 if (new_mem->mem_type != TTM_PL_SYSTEM)
370 {
371 hop->mem_type = TTM_PL_SYSTEM;
372 hop->flags = TTM_PL_FLAG_TEMPORARY;
373 return -EMULTIHOP;
374 }
375 ttm_bo_move_null(bo, new_mem);
376 return 0;
377 }
378# endif
379 return ttm_bo_move_memcpy(bo, ctx, new_mem);
380}
381#endif
382
383#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
384static struct ttm_device_funcs vbox_bo_driver = {
385#else /* < 5.13.0 */
386static struct ttm_bo_driver vbox_bo_driver = {
387#endif /* < 5.13.0 */
388 .ttm_tt_create = vbox_ttm_tt_create,
389#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
390 .ttm_tt_destroy = vbox_ttm_tt_destroy,
391#endif
392#if RTLNX_VER_MAX(4,17,0)
393 .ttm_tt_populate = vbox_ttm_tt_populate,
394 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
395#endif
396#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
397 .init_mem_type = vbox_bo_init_mem_type,
398#endif
399#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
400 .eviction_valuable = ttm_bo_eviction_valuable,
401#endif
402 .evict_flags = vbox_bo_evict_flags,
403#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
404 .verify_access = vbox_bo_verify_access,
405#endif
406 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
407 .io_mem_free = &vbox_ttm_io_mem_free,
408#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
409# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
410 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
411# endif
412#endif
413#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
414 .lru_tail = &ttm_bo_default_lru_tail,
415 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
416#endif
417#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
418 .move = &vbox_bo_move,
419#endif
420};
421
422int vbox_mm_init(struct vbox_private *vbox)
423{
424 int ret;
425 struct drm_device *dev = vbox->dev;
426#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
427 struct ttm_device *bdev = &vbox->ttm.bdev;
428#else
429 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
430#endif
431
432#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
433 ret = vbox_ttm_global_init(vbox);
434 if (ret)
435 return ret;
436#endif
437#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
438 ret = ttm_device_init(&vbox->ttm.bdev,
439#else
440 ret = ttm_bo_device_init(&vbox->ttm.bdev,
441#endif
442#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
443 vbox->ttm.bo_global_ref.ref.object,
444#endif
445 &vbox_bo_driver,
446#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
447 dev->dev,
448#endif
449#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
450 dev->anon_inode->i_mapping,
451#endif
452#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
453 dev->vma_offset_manager,
454#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
455 DRM_FILE_PAGE_OFFSET,
456#endif
457#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
458 false,
459#endif
460 true);
461 if (ret) {
462 DRM_ERROR("Error initialising bo driver; %d\n", ret);
463#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
464 goto err_ttm_global_release;
465#else
466 return ret;
467#endif
468 }
469
470#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
471 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
472 vbox->available_vram_size >> PAGE_SHIFT);
473#else
474 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
475 vbox->available_vram_size >> PAGE_SHIFT);
476#endif
477 if (ret) {
478 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
479 goto err_device_release;
480 }
481
482#ifdef DRM_MTRR_WC
483 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
484 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0),
485 DRM_MTRR_WC);
486#else
487 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
488 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0));
489#endif
490 return 0;
491
492err_device_release:
493#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
494 ttm_device_fini(&vbox->ttm.bdev);
495#else
496 ttm_bo_device_release(&vbox->ttm.bdev);
497#endif
498#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
499err_ttm_global_release:
500 vbox_ttm_global_release(vbox);
501#endif
502 return ret;
503}
504
505void vbox_mm_fini(struct vbox_private *vbox)
506{
507#ifdef DRM_MTRR_WC
508 drm_mtrr_del(vbox->fb_mtrr,
509 pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0),
510 pci_resource_len(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0), DRM_MTRR_WC);
511#else
512 arch_phys_wc_del(vbox->fb_mtrr);
513#endif
514#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
515 ttm_device_fini(&vbox->ttm.bdev);
516#else
517 ttm_bo_device_release(&vbox->ttm.bdev);
518#endif
519#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
520 vbox_ttm_global_release(vbox);
521#endif
522}
523
524void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
525{
526 u32 c = 0;
527#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
528 bo->placement.fpfn = 0;
529 bo->placement.lpfn = 0;
530#else
531 unsigned int i;
532#endif
533
534 bo->placement.placement = bo->placements;
535#if RTLNX_VER_MAX(6,9,0) && !RTLNX_RHEL_MAJ_PREREQ(9,5)
536 bo->placement.busy_placement = bo->placements;
537#endif
538
539 if (mem_type & VBOX_MEM_TYPE_VRAM) {
540#if RTLNX_VER_MIN(6,9,0)
541 bo->placements[c].mem_type = TTM_PL_VRAM;
542 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
543#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
544 bo->placements[c].mem_type = TTM_PL_VRAM;
545 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
546#elif RTLNX_VER_MIN(5,10,0)
547 bo->placements[c].mem_type = TTM_PL_VRAM;
548 PLACEMENT_FLAGS(bo->placements[c++]) =
549 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
550#else
551 PLACEMENT_FLAGS(bo->placements[c++]) =
552 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
553#endif
554 }
555 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
556
557#if RTLNX_VER_MIN(6,9,0)
558 bo->placements[c].mem_type = TTM_PL_SYSTEM;
559 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
560#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
561 bo->placements[c].mem_type = TTM_PL_SYSTEM;
562 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
563#elif RTLNX_VER_MIN(5,10,0)
564 bo->placements[c].mem_type = TTM_PL_SYSTEM;
565 PLACEMENT_FLAGS(bo->placements[c++]) =
566 TTM_PL_MASK_CACHING;
567#else
568 PLACEMENT_FLAGS(bo->placements[c++]) =
569 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
570#endif
571 }
572 if (!c) {
573#if RTLNX_VER_MIN(6,9,0)
574 bo->placements[c].mem_type = TTM_PL_SYSTEM;
575 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
576#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
577 bo->placements[c].mem_type = TTM_PL_SYSTEM;
578 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
579#elif RTLNX_VER_MIN(5,10,0)
580 bo->placements[c].mem_type = TTM_PL_SYSTEM;
581 PLACEMENT_FLAGS(bo->placements[c++]) =
582 TTM_PL_MASK_CACHING;
583#else
584 PLACEMENT_FLAGS(bo->placements[c++]) =
585 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
586#endif
587 }
588
589 bo->placement.num_placement = c;
590#if RTLNX_VER_MAX(6,9,0) && !RTLNX_RHEL_MAJ_PREREQ(9,5)
591 bo->placement.num_busy_placement = c;
592#endif
593
594#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
595 for (i = 0; i < c; ++i) {
596 bo->placements[i].fpfn = 0;
597 bo->placements[i].lpfn = 0;
598 }
599#endif
600}
601
602#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
603static const struct drm_gem_object_funcs vbox_drm_gem_object_funcs = {
604 .free = vbox_gem_free_object,
605 .print_info = drm_gem_ttm_print_info,
606# if RTLNX_VER_MIN(6,5,0)
607 .vmap = drm_gem_ttm_vmap,
608 .vunmap = drm_gem_ttm_vunmap,
609# endif
610# if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
611 .mmap = drm_gem_ttm_mmap,
612# endif
613};
614#endif
615
616int vbox_bo_create(struct drm_device *dev, int size, int align,
617 u32 flags, struct vbox_bo **pvboxbo)
618{
619 struct vbox_private *vbox = dev->dev_private;
620 struct vbox_bo *vboxbo;
621#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
622 size_t acc_size;
623#endif
624 int ret;
625
626 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
627 if (!vboxbo)
628 return -ENOMEM;
629
630 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
631 if (ret)
632 goto err_free_vboxbo;
633
634#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
635 if (!vboxbo->gem.funcs) {
636 vboxbo->gem.funcs = &vbox_drm_gem_object_funcs;
637 }
638#endif
639 vboxbo->bo.bdev = &vbox->ttm.bdev;
640#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
641 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
642#endif
643
644 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
645
646#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
647 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
648 sizeof(struct vbox_bo));
649#endif
650
651#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
652 /* Initialization of the following was removed from DRM stack
653 * in 5.14, so we need to do it manually. */
654 vboxbo->bo.base.funcs = &vbox_drm_gem_object_funcs;
655 kref_init(&vboxbo->bo.base.refcount);
656 vboxbo->bo.base.size = size;
657 vboxbo->bo.base.dev = dev;
658 dma_resv_init(&vboxbo->bo.base._resv);
659 drm_vma_node_reset(&vboxbo->bo.base.vma_node);
660#endif
661
662#if RTLNX_VER_MIN(6,1,0) || RTLNX_RHEL_RANGE(8,9, 8,99) || RTLNX_RHEL_RANGE(9,3, 9,99) || RTLNX_SUSE_MAJ_PREREQ(15,5)
663 ret = ttm_bo_init_validate(&vbox->ttm.bdev, &vboxbo->bo,
664#else
665 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
666#endif /* < 6.1.0 */
667 ttm_bo_type_device, &vboxbo->placement,
668#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
669 align >> PAGE_SHIFT, false, NULL, acc_size,
670#elif RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99) /* < 5.13.0, < RHEL(8.6, 8.99) */
671 align >> PAGE_SHIFT, false, acc_size,
672#else /* > 5.13.0 */
673 align >> PAGE_SHIFT, false,
674#endif /* > 5.13.0 */
675#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
676 NULL, NULL, vbox_bo_ttm_destroy);
677#else
678 NULL, vbox_bo_ttm_destroy);
679#endif
680 if (ret)
681 {
682 /* In case of failure, ttm_bo_init() supposed to call
683 * vbox_bo_ttm_destroy() which in turn will free @vboxbo. */
684 goto err_exit;
685 }
686
687 *pvboxbo = vboxbo;
688
689 return 0;
690
691err_free_vboxbo:
692 kfree(vboxbo);
693err_exit:
694 return ret;
695}
696
697static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
698{
699#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
700 return bo->bo.resource->start << PAGE_SHIFT;
701#elif RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
702 return bo->bo.mem.start << PAGE_SHIFT;
703#else
704 return bo->bo.offset;
705#endif
706}
707
708int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
709{
710#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
711 struct ttm_operation_ctx ctx = { false, false };
712#endif
713 int ret;
714#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
715 int i;
716#endif
717
718 if (bo->pin_count) {
719 bo->pin_count++;
720 if (gpu_addr)
721 *gpu_addr = vbox_bo_gpu_offset(bo);
722
723 return 0;
724 }
725
726 vbox_ttm_placement(bo, mem_type);
727
728#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
729 for (i = 0; i < bo->placement.num_placement; i++)
730 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
731#endif
732
733#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
734 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
735#else
736 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
737#endif
738 if (ret)
739 return ret;
740
741 bo->pin_count = 1;
742
743#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
744 ttm_bo_pin(&bo->bo);
745#endif
746
747 if (gpu_addr)
748 *gpu_addr = vbox_bo_gpu_offset(bo);
749
750 return 0;
751}
752
753int vbox_bo_unpin(struct vbox_bo *bo)
754{
755#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
756# if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
757 struct ttm_operation_ctx ctx = { false, false };
758# endif
759#endif
760 int ret = 0;
761#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
762 int i;
763#endif
764
765 if (!bo->pin_count) {
766 DRM_ERROR("unpin bad %p\n", bo);
767 return 0;
768 }
769 bo->pin_count--;
770 if (bo->pin_count)
771 return 0;
772
773#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
774 for (i = 0; i < bo->placement.num_placement; i++)
775 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
776#endif
777
778#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
779 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
780#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
781 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
782#endif
783 if (ret)
784 return ret;
785
786#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
787 ttm_bo_unpin(&bo->bo);
788#endif
789
790 return 0;
791}
792
793#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
794/*
795 * Move a vbox-owned buffer object to system memory if no one else has it
796 * pinned. The caller must have pinned it previously, and this call will
797 * release the caller's pin.
798 */
799int vbox_bo_push_sysram(struct vbox_bo *bo)
800{
801# if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
802 struct ttm_operation_ctx ctx = { false, false };
803# endif
804 int i, ret;
805
806 if (!bo->pin_count) {
807 DRM_ERROR("unpin bad %p\n", bo);
808 return 0;
809 }
810 bo->pin_count--;
811 if (bo->pin_count)
812 return 0;
813
814 if (bo->kmap.virtual)
815 ttm_bo_kunmap(&bo->kmap);
816
817 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
818
819 for (i = 0; i < bo->placement.num_placement; i++)
820 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
821
822# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
823 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
824# else
825 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
826# endif
827 if (ret) {
828 DRM_ERROR("pushing to VRAM failed\n");
829 return ret;
830 }
831
832 return 0;
833}
834#endif
835
836int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
837{
838 struct drm_file *file_priv;
839 struct vbox_private *vbox;
840 int ret = -EINVAL;
841
842 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
843 return -EINVAL;
844
845 file_priv = filp->private_data;
846 vbox = file_priv->minor->dev->dev_private;
847
848#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
849 (void)vbox;
850 if (drm_dev_is_unplugged(file_priv->minor->dev))
851 return -ENODEV;
852 ret = drm_gem_mmap(filp, vma);
853#else
854 ret = ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
855#endif
856 return ret;
857}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette