- Timestamp:
- Jun 14, 2017 1:17:27 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/drm/vbox_ttm.c
r67191 r67406 37 37 38 38 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) 39 # 39 #define PLACEMENT_FLAGS(placement) (placement) 40 40 #else 41 # define PLACEMENT_FLAGS(placement) (placement).flags 42 #endif 43 44 static inline struct vbox_private * 45 vbox_bdev(struct ttm_bo_device *bd) 46 { 47 return container_of(bd, struct vbox_private, ttm.bdev); 48 } 49 50 static int 51 vbox_ttm_mem_global_init(struct drm_global_reference *ref) 52 { 53 return ttm_mem_global_init(ref->object); 54 } 55 56 static void 57 vbox_ttm_mem_global_release(struct drm_global_reference *ref) 58 { 59 ttm_mem_global_release(ref->object); 41 #define PLACEMENT_FLAGS(placement) ((placement).flags) 42 #endif 43 44 static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd) 45 { 46 return container_of(bd, struct vbox_private, ttm.bdev); 47 } 48 49 static int vbox_ttm_mem_global_init(struct drm_global_reference *ref) 50 { 51 return ttm_mem_global_init(ref->object); 52 } 53 54 static void vbox_ttm_mem_global_release(struct drm_global_reference *ref) 55 { 56 ttm_mem_global_release(ref->object); 60 57 } 61 58 … … 65 62 static int vbox_ttm_global_init(struct vbox_private *vbox) 66 63 { 67 68 69 70 71 72 73 74 75 76 77 DRM_ERROR("Failed setting up TTM memory accounting " 78 "subsystem.\n");79 return r; 80 } 81 82 vbox->ttm.bo_global_ref.mem_glob = 83 vbox->ttm.mem_global_ref.object;84 global_ref = &vbox->ttm.bo_global_ref.ref;85 global_ref->global_type = DRM_GLOBAL_TTM_BO;86 global_ref->size = sizeof(struct ttm_bo_global);87 global_ref->init = &ttm_bo_global_init; 88 global_ref->release = &ttm_bo_global_release;89 r = drm_global_item_ref(global_ref); 90 if (r != 0) { 91 DRM_ERROR("Failed setting up TTM BO subsystem.\n");92 drm_global_item_unref(&vbox->ttm.mem_global_ref);93 return r; 94 } 95 64 struct drm_global_reference *global_ref; 65 int r; 66 67 global_ref = &vbox->ttm.mem_global_ref; 68 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 69 global_ref->size = sizeof(struct ttm_mem_global); 70 global_ref->init = &vbox_ttm_mem_global_init; 71 global_ref->release = &vbox_ttm_mem_global_release; 72 r = drm_global_item_ref(global_ref); 73 if (r != 0) { 74 DRM_ERROR("Failed setting up TTM memory accounting subsystem.\n"); 75 return r; 76 } 77 78 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object; 79 global_ref = &vbox->ttm.bo_global_ref.ref; 80 global_ref->global_type = DRM_GLOBAL_TTM_BO; 81 global_ref->size = sizeof(struct ttm_bo_global); 82 global_ref->init = &ttm_bo_global_init; 83 global_ref->release = &ttm_bo_global_release; 84 85 r = drm_global_item_ref(global_ref); 86 if (r != 0) { 87 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 88 drm_global_item_unref(&vbox->ttm.mem_global_ref); 89 return r; 90 } 91 92 return 0; 96 93 } 97 94 … … 99 96 * Removes the vbox memory manager object from the global memory manager. 100 97 */ 101 static void 102 vbox_ttm_global_release(struct vbox_private *vbox) 103 { 104 if (vbox->ttm.mem_global_ref.release == NULL) 105 return; 106 107 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref); 108 drm_global_item_unref(&vbox->ttm.mem_global_ref); 109 vbox->ttm.mem_global_ref.release = NULL; 110 } 111 98 static void vbox_ttm_global_release(struct vbox_private *vbox) 99 { 100 if (!vbox->ttm.mem_global_ref.release) 101 return; 102 103 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref); 104 drm_global_item_unref(&vbox->ttm.mem_global_ref); 105 vbox->ttm.mem_global_ref.release = NULL; 106 } 112 107 113 108 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo) 114 109 { 115 116 117 118 119 120 110 struct vbox_bo *bo; 111 112 bo = container_of(tbo, struct vbox_bo, bo); 113 114 drm_gem_object_release(&bo->gem); 115 kfree(bo); 121 116 } 122 117 123 118 static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo) 124 119 { 125 if (bo->destroy == &vbox_bo_ttm_destroy) 126 return true; 127 return false; 120 if (bo->destroy == &vbox_bo_ttm_destroy) 121 return true; 122 123 return false; 128 124 } 129 125 130 126 static int 131 vbox_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 132 struct ttm_mem_type_manager *man) 133 { 134 switch (type) { 135 case TTM_PL_SYSTEM: 136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 137 man->available_caching = TTM_PL_MASK_CACHING; 138 man->default_caching = TTM_PL_FLAG_CACHED; 139 break; 140 case TTM_PL_VRAM: 141 man->func = &ttm_bo_manager_func; 142 man->flags = TTM_MEMTYPE_FLAG_FIXED | 143 TTM_MEMTYPE_FLAG_MAPPABLE; 144 man->available_caching = TTM_PL_FLAG_UNCACHED | 145 TTM_PL_FLAG_WC; 146 man->default_caching = TTM_PL_FLAG_WC; 147 break; 148 default: 149 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 150 return -EINVAL; 151 } 152 return 0; 127 vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, 128 struct ttm_mem_type_manager *man) 129 { 130 switch (type) { 131 case TTM_PL_SYSTEM: 132 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 133 man->available_caching = TTM_PL_MASK_CACHING; 134 man->default_caching = TTM_PL_FLAG_CACHED; 135 break; 136 case TTM_PL_VRAM: 137 man->func = &ttm_bo_manager_func; 138 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 139 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 140 man->default_caching = TTM_PL_FLAG_WC; 141 break; 142 default: 143 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); 144 return -EINVAL; 145 } 146 147 return 0; 153 148 } 154 149 … … 156 151 vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 157 152 { 158 struct vbox_bo *vboxbo = vbox_bo(bo); 159 160 if (!vbox_ttm_bo_is_vbox_bo(bo)) 161 return; 162 163 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); 164 *pl = vboxbo->placement; 165 } 166 167 static int vbox_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 168 { 169 return 0; 153 struct vbox_bo *vboxbo = vbox_bo(bo); 154 155 if (!vbox_ttm_bo_is_vbox_bo(bo)) 156 return; 157 158 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); 159 *pl = vboxbo->placement; 160 } 161 162 static int vbox_bo_verify_access(struct ttm_buffer_object *bo, 163 struct file *filp) 164 { 165 return 0; 170 166 } 171 167 172 168 static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 173 174 { 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 break; 197 } 198 return 0; 199 } 200 201 static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,struct ttm_mem_reg *mem)169 struct ttm_mem_reg *mem) 170 { 171 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 172 struct vbox_private *vbox = vbox_bdev(bdev); 173 174 mem->bus.addr = NULL; 175 mem->bus.offset = 0; 176 mem->bus.size = mem->num_pages << PAGE_SHIFT; 177 mem->bus.base = 0; 178 mem->bus.is_iomem = false; 179 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 180 return -EINVAL; 181 switch (mem->mem_type) { 182 case TTM_PL_SYSTEM: 183 /* system memory */ 184 return 0; 185 case TTM_PL_VRAM: 186 mem->bus.offset = mem->start << PAGE_SHIFT; 187 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0); 188 mem->bus.is_iomem = true; 189 break; 190 default: 191 return -EINVAL; 192 } 193 return 0; 194 } 195 196 static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, 197 struct ttm_mem_reg *mem) 202 198 { 203 199 } 204 200 205 201 static int vbox_bo_move(struct ttm_buffer_object *bo, 206 207 bool no_wait_gpu, 208 struct ttm_mem_reg *new_mem) 209 { 210 int r; 202 bool evict, bool interruptible, 203 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 204 { 205 int r; 206 211 207 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) 212 208 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 213 209 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) 214 210 r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem); 215 211 #else 216 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); 217 #endif 218 return r; 219 } 220 212 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); 213 #endif 214 return r; 215 } 221 216 222 217 static void vbox_ttm_backend_destroy(struct ttm_tt *tt) 223 218 { 224 225 219 ttm_tt_fini(tt); 220 kfree(tt); 226 221 } 227 222 228 223 static struct ttm_backend_func vbox_tt_backend_func = { 229 224 .destroy = &vbox_ttm_backend_destroy, 230 225 }; 231 226 232 233 227 static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, 234 unsigned long size, uint32_t page_flags, 235 struct page *dummy_read_page) 236 { 237 struct ttm_tt *tt; 238 239 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); 240 if (tt == NULL) 241 return NULL; 242 tt->func = &vbox_tt_backend_func; 243 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { 244 kfree(tt); 245 return NULL; 246 } 247 return tt; 228 unsigned long size, 229 u32 page_flags, 230 struct page *dummy_read_page) 231 { 232 struct ttm_tt *tt; 233 234 tt = kzalloc(sizeof(*tt), GFP_KERNEL); 235 if (!tt) 236 return NULL; 237 238 tt->func = &vbox_tt_backend_func; 239 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { 240 kfree(tt); 241 return NULL; 242 } 243 244 return tt; 248 245 } 249 246 250 247 static int vbox_ttm_tt_populate(struct ttm_tt *ttm) 251 248 { 252 249 return ttm_pool_populate(ttm); 253 250 } 254 251 255 252 static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm) 256 253 { 257 254 ttm_pool_unpopulate(ttm); 258 255 } 259 256 260 257 struct ttm_bo_driver vbox_bo_driver = { 261 262 263 264 258 .ttm_tt_create = vbox_ttm_tt_create, 259 .ttm_tt_populate = vbox_ttm_tt_populate, 260 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate, 261 .init_mem_type = vbox_bo_init_mem_type, 265 262 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) 266 267 #endif 268 269 270 271 272 263 .eviction_valuable = ttm_bo_eviction_valuable, 264 #endif 265 .evict_flags = vbox_bo_evict_flags, 266 .move = vbox_bo_move, 267 .verify_access = vbox_bo_verify_access, 268 .io_mem_reserve = &vbox_ttm_io_mem_reserve, 269 .io_mem_free = &vbox_ttm_io_mem_free, 273 270 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 274 271 .io_mem_pfn = ttm_bo_default_io_mem_pfn, 275 272 #endif 276 273 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 277 278 274 .lru_tail = &ttm_bo_default_lru_tail, 275 .swap_lru_tail = &ttm_bo_default_swap_lru_tail, 279 276 #endif 280 277 }; … … 282 279 int vbox_mm_init(struct vbox_private *vbox) 283 280 { 284 285 286 287 288 289 290 291 292 293 294 281 int ret; 282 struct drm_device *dev = vbox->dev; 283 struct ttm_bo_device *bdev = &vbox->ttm.bdev; 284 285 ret = vbox_ttm_global_init(vbox); 286 if (ret) 287 return ret; 288 289 ret = ttm_bo_device_init(&vbox->ttm.bdev, 290 vbox->ttm.bo_global_ref.ref.object, 291 &vbox_bo_driver, 295 292 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) 296 dev->anon_inode->i_mapping, 297 #endif 298 DRM_FILE_PAGE_OFFSET, 299 true); 300 if (ret) { 301 DRM_ERROR("Error initialising bo driver; %d\n", ret); 302 return ret; 303 } 304 305 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 306 vbox->available_vram_size >> PAGE_SHIFT); 307 if (ret) { 308 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 309 return ret; 310 } 311 293 dev->anon_inode->i_mapping, 294 #endif 295 DRM_FILE_PAGE_OFFSET, true); 296 if (ret) { 297 DRM_ERROR("Error initialising bo driver; %d\n", ret); 298 return ret; 299 } 300 301 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 302 vbox->available_vram_size >> PAGE_SHIFT); 303 if (ret) { 304 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 305 return ret; 306 } 312 307 #ifdef DRM_MTRR_WC 313 314 315 308 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 309 pci_resource_len(dev->pdev, 0), 310 DRM_MTRR_WC); 316 311 #else 317 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 318 pci_resource_len(dev->pdev, 0)); 319 #endif 320 321 vbox->ttm.mm_initialised = true; 322 return 0; 312 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 313 pci_resource_len(dev->pdev, 0)); 314 #endif 315 316 vbox->ttm.mm_initialised = true; 317 318 return 0; 323 319 } 324 320 … … 326 322 { 327 323 #ifdef DRM_MTRR_WC 328 329 #endif 330 331 332 333 334 324 struct drm_device *dev = vbox->dev; 325 #endif 326 if (!vbox->ttm.mm_initialised) 327 return; 328 ttm_bo_device_release(&vbox->ttm.bdev); 329 330 vbox_ttm_global_release(vbox); 335 331 336 332 #ifdef DRM_MTRR_WC 337 338 339 333 drm_mtrr_del(vbox->fb_mtrr, 334 pci_resource_start(dev->pdev, 0), 335 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); 340 336 #else 341 337 arch_phys_wc_del(vbox->fb_mtrr); 342 338 #endif 343 339 } … … 345 341 void vbox_ttm_placement(struct vbox_bo *bo, int domain) 346 342 { 347 343 u32 c = 0; 348 344 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) 349 350 345 bo->placement.fpfn = 0; 346 bo->placement.lpfn = 0; 351 347 #else 352 unsigned i; 353 #endif 354 355 bo->placement.placement = bo->placements; 356 bo->placement.busy_placement = bo->placements; 357 if (domain & TTM_PL_FLAG_VRAM) 358 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 359 if (domain & TTM_PL_FLAG_SYSTEM) 360 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 361 if (!c) 362 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 363 bo->placement.num_placement = c; 364 bo->placement.num_busy_placement = c; 348 unsigned int i; 349 #endif 350 351 bo->placement.placement = bo->placements; 352 bo->placement.busy_placement = bo->placements; 353 354 if (domain & TTM_PL_FLAG_VRAM) 355 PLACEMENT_FLAGS(bo->placements[c++]) = 356 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 357 if (domain & TTM_PL_FLAG_SYSTEM) 358 PLACEMENT_FLAGS(bo->placements[c++]) = 359 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 360 if (!c) 361 PLACEMENT_FLAGS(bo->placements[c++]) = 362 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 363 364 bo->placement.num_placement = c; 365 bo->placement.num_busy_placement = c; 365 366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) 366 367 368 369 367 for (i = 0; i < c; ++i) { 368 bo->placements[i].fpfn = 0; 369 bo->placements[i].lpfn = 0; 370 } 370 371 #endif 371 372 } 372 373 373 374 int vbox_bo_create(struct drm_device *dev, int size, int align, 374 uint32_tflags, struct vbox_bo **pvboxbo)375 { 376 377 378 379 380 381 vboxbo = kzalloc(sizeof(struct vbox_bo), GFP_KERNEL);382 383 384 385 386 387 388 389 390 391 375 u32 flags, struct vbox_bo **pvboxbo) 376 { 377 struct vbox_private *vbox = dev->dev_private; 378 struct vbox_bo *vboxbo; 379 size_t acc_size; 380 int ret; 381 382 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL); 383 if (!vboxbo) 384 return -ENOMEM; 385 386 ret = drm_gem_object_init(dev, &vboxbo->gem, size); 387 if (ret) { 388 kfree(vboxbo); 389 return ret; 390 } 391 392 vboxbo->bo.bdev = &vbox->ttm.bdev; 392 393 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) 393 394 #endif 395 396 397 398 399 400 401 402 403 394 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping; 395 #endif 396 397 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 398 399 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size, 400 sizeof(struct vbox_bo)); 401 402 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size, 403 ttm_bo_type_device, &vboxbo->placement, 404 align >> PAGE_SHIFT, false, NULL, acc_size, 404 405 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) 405 NULL, 406 #endif 407 NULL, vbox_bo_ttm_destroy); 408 if (ret) 409 return ret; 410 411 *pvboxbo = vboxbo; 412 return 0; 406 NULL, 407 #endif 408 NULL, vbox_bo_ttm_destroy); 409 if (ret) 410 return ret; 411 412 *pvboxbo = vboxbo; 413 414 return 0; 413 415 } 414 416 415 417 static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo) 416 418 { 417 419 return bo->bo.offset; 418 420 } 419 421 420 422 int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr) 421 423 { 422 int i, ret; 423 424 if (bo->pin_count) { 425 bo->pin_count++; 426 if (gpu_addr) 427 *gpu_addr = vbox_bo_gpu_offset(bo); 428 return 0; 429 } 430 431 vbox_ttm_placement(bo, pl_flag); 432 for (i = 0; i < bo->placement.num_placement; i++) 433 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT; 434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 435 if (ret) 436 return ret; 437 438 bo->pin_count = 1; 439 if (gpu_addr) 440 *gpu_addr = vbox_bo_gpu_offset(bo); 441 return 0; 424 int i, ret; 425 426 if (bo->pin_count) { 427 bo->pin_count++; 428 if (gpu_addr) 429 *gpu_addr = vbox_bo_gpu_offset(bo); 430 431 return 0; 432 } 433 434 vbox_ttm_placement(bo, pl_flag); 435 436 for (i = 0; i < bo->placement.num_placement; i++) 437 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT; 438 439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 440 if (ret) 441 return ret; 442 443 bo->pin_count = 1; 444 445 if (gpu_addr) 446 *gpu_addr = vbox_bo_gpu_offset(bo); 447 448 return 0; 442 449 } 443 450 444 451 int vbox_bo_unpin(struct vbox_bo *bo) 445 452 { 446 int i, ret; 447 if (!bo->pin_count) { 448 DRM_ERROR("unpin bad %p\n", bo); 449 return 0; 450 } 451 bo->pin_count--; 452 if (bo->pin_count) 453 return 0; 454 455 for (i = 0; i < bo->placement.num_placement ; i++) 456 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT; 457 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 458 if (ret) 459 return ret; 460 461 return 0; 462 } 463 464 /* Move a vbox-owned buffer object to system memory if no one else has it 453 int i, ret; 454 455 if (!bo->pin_count) { 456 DRM_ERROR("unpin bad %p\n", bo); 457 return 0; 458 } 459 bo->pin_count--; 460 if (bo->pin_count) 461 return 0; 462 463 for (i = 0; i < bo->placement.num_placement; i++) 464 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT; 465 466 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 467 if (ret) 468 return ret; 469 470 return 0; 471 } 472 473 /* 474 * Move a vbox-owned buffer object to system memory if no one else has it 465 475 * pinned. The caller must have pinned it previously, and this call will 466 * release the caller's pin. */ 476 * release the caller's pin. 477 */ 467 478 int vbox_bo_push_sysram(struct vbox_bo *bo) 468 479 { 469 int i, ret; 470 if (!bo->pin_count) { 471 DRM_ERROR("unpin bad %p\n", bo); 472 return 0; 473 } 474 bo->pin_count--; 475 if (bo->pin_count) 476 return 0; 477 478 if (bo->kmap.virtual) 479 ttm_bo_kunmap(&bo->kmap); 480 481 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 482 for (i = 0; i < bo->placement.num_placement ; i++) 483 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT; 484 485 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 486 if (ret) { 487 DRM_ERROR("pushing to VRAM failed\n"); 488 return ret; 489 } 490 return 0; 480 int i, ret; 481 482 if (!bo->pin_count) { 483 DRM_ERROR("unpin bad %p\n", bo); 484 return 0; 485 } 486 bo->pin_count--; 487 if (bo->pin_count) 488 return 0; 489 490 if (bo->kmap.virtual) 491 ttm_bo_kunmap(&bo->kmap); 492 493 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 494 495 for (i = 0; i < bo->placement.num_placement; i++) 496 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT; 497 498 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 499 if (ret) { 500 DRM_ERROR("pushing to VRAM failed\n"); 501 return ret; 502 } 503 504 return 0; 491 505 } 492 506 493 507 int vbox_mmap(struct file *filp, struct vm_area_struct *vma) 494 508 { 495 struct drm_file *file_priv; 496 struct vbox_private *vbox; 497 498 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 499 return -EINVAL; 500 501 file_priv = filp->private_data; 502 vbox = file_priv->minor->dev->dev_private; 503 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); 504 } 509 struct drm_file *file_priv; 510 struct vbox_private *vbox; 511 512 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 513 return -EINVAL; 514 515 file_priv = filp->private_data; 516 vbox = file_priv->minor->dev->dev_private; 517 518 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); 519 }
Note:
See TracChangeset
for help on using the changeset viewer.