Changeset 67403 in vbox for trunk/src/VBox/Additions/linux
- Timestamp:
- Jun 14, 2017 1:15:12 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/drm/vbox_main.c
r67269 r67403 43 43 static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) 44 44 { 45 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); 46 if (vbox_fb->obj) 47 drm_gem_object_unreference_unlocked(vbox_fb->obj); 48 49 drm_framebuffer_cleanup(fb); 50 kfree(fb); 45 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); 46 47 if (vbox_fb->obj) 48 drm_gem_object_unreference_unlocked(vbox_fb->obj); 49 50 drm_framebuffer_cleanup(fb); 51 kfree(fb); 51 52 } 52 53 53 54 void vbox_enable_accel(struct vbox_private *vbox) 54 55 { 55 unsigned i; 56 struct VBVABUFFER *vbva; 57 58 if (!vbox->vbva_info || !vbox->vbva_buffers) { /* Should never happen... */ 59 printk(KERN_ERR "vboxvideo: failed to set up VBVA.\n"); 60 return; 61 } 62 for (i = 0; i < vbox->num_crtcs; ++i) { 63 if (vbox->vbva_info[i].pVBVA == NULL) { 64 vbva = (struct VBVABUFFER *) ((u8 *)vbox->vbva_buffers 65 + i * VBVA_MIN_BUFFER_SIZE); 66 if (!VBoxVBVAEnable(&vbox->vbva_info[i], vbox->guest_pool, vbva, i)) { 67 /* very old host or driver error. */ 68 printk(KERN_ERR "vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n"); 69 return; 70 } 71 } 72 } 56 unsigned int i; 57 struct VBVABUFFER *vbva; 58 59 if (!vbox->vbva_info || !vbox->vbva_buffers) { 60 /* Should never happen... */ 61 DRM_ERROR("vboxvideo: failed to set up VBVA.\n"); 62 return; 63 } 64 65 for (i = 0; i < vbox->num_crtcs; ++i) { 66 if (!vbox->vbva_info[i].pVBVA) { 67 vbva = (struct VBVABUFFER *) 68 ((u8 *)vbox->vbva_buffers + 69 i * VBVA_MIN_BUFFER_SIZE); 70 if (!VBoxVBVAEnable(&vbox->vbva_info[i], 71 vbox->guest_pool, vbva, i)) { 72 /* very old host or driver error. */ 73 DRM_ERROR("vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n"); 74 return; 75 } 76 } 77 } 73 78 } 74 79 75 80 void vbox_disable_accel(struct vbox_private *vbox) 76 81 { 77 unsignedi;78 79 80 82 unsigned int i; 83 84 for (i = 0; i < vbox->num_crtcs; ++i) 85 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i); 81 86 } 82 87 83 88 void vbox_report_caps(struct vbox_private *vbox) 84 89 { 85 uint32_t caps =VBVACAPS_DISABLE_CURSOR_INTEGRATION86 | VBVACAPS_IRQ 87 | VBVACAPS_USE_VBVA_ONLY; 88 if (vbox->initial_mode_queried) 89 caps |= VBVACAPS_VIDEO_MODE_HINTS;90 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps); 91 } 92 93 /** Send information about dirty rectangles to VBVA. If necessary we enable90 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION 91 | VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY; 92 if (vbox->initial_mode_queried) 93 caps |= VBVACAPS_VIDEO_MODE_HINTS; 94 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps); 95 } 96 97 /** 98 * Send information about dirty rectangles to VBVA. If necessary we enable 94 99 * VBVA first, as this is normally disabled after a change of master in case 95 100 * the new master does not send dirty rectangle information (is this even 96 * allowed?) */ 101 * allowed?) 102 */ 97 103 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, 98 struct drm_clip_rect *rects, 99 unsigned num_rects) 100 { 101 struct vbox_private *vbox = fb->dev->dev_private; 102 struct drm_crtc *crtc; 103 unsigned i; 104 105 mutex_lock(&vbox->hw_mutex); 106 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { 107 if (CRTC_FB(crtc) == fb) { 108 vbox_enable_accel(vbox); 109 for (i = 0; i < num_rects; ++i) 110 { 111 unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id; 112 VBVACMDHDR cmd_hdr; 113 114 if ( rects[i].x1 > crtc->x 115 + crtc->hwmode.hdisplay 116 || rects[i].y1 > crtc->y 117 + crtc->hwmode.vdisplay 118 || rects[i].x2 < crtc->x 119 || rects[i].y2 < crtc->y) 120 continue; 121 cmd_hdr.x = (int16_t)rects[i].x1; 122 cmd_hdr.y = (int16_t)rects[i].y1; 123 cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1; 124 cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1; 125 if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id], 126 vbox->guest_pool)) 127 { 128 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], vbox->guest_pool, &cmd_hdr, 129 sizeof(cmd_hdr)); 130 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]); 131 } 132 } 133 } 134 } 135 mutex_unlock(&vbox->hw_mutex); 104 struct drm_clip_rect *rects, 105 unsigned int num_rects) 106 { 107 struct vbox_private *vbox = fb->dev->dev_private; 108 struct drm_crtc *crtc; 109 unsigned int i; 110 111 mutex_lock(&vbox->hw_mutex); 112 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { 113 if (CRTC_FB(crtc) == fb) { 114 vbox_enable_accel(vbox); 115 for (i = 0; i < num_rects; ++i) { 116 VBVACMDHDR cmd_hdr; 117 unsigned int crtc_id = 118 to_vbox_crtc(crtc)->crtc_id; 119 120 if ((rects[i].x1 > 121 crtc->x + crtc->hwmode.hdisplay) || 122 (rects[i].y1 > 123 crtc->y + crtc->hwmode.vdisplay) || 124 (rects[i].x2 < crtc->x) || 125 (rects[i].y2 < crtc->y)) 126 continue; 127 128 cmd_hdr.x = (s16)rects[i].x1; 129 cmd_hdr.y = (s16)rects[i].y1; 130 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; 131 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; 132 133 if (VBoxVBVABufferBeginUpdate( 134 &vbox->vbva_info[crtc_id], 135 vbox->guest_pool)) { 136 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], 137 vbox->guest_pool, 138 &cmd_hdr, 139 sizeof(cmd_hdr)); 140 VBoxVBVABufferEndUpdate( 141 &vbox->vbva_info[crtc_id]); 142 } 143 } 144 } 145 } 146 mutex_unlock(&vbox->hw_mutex); 136 147 } 137 148 138 149 static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, 139 struct drm_file *file_priv, 140 unsigned flags, unsigned color, 141 struct drm_clip_rect *rects, 142 unsigned num_rects) 143 { 144 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); 145 return 0; 150 struct drm_file *file_priv, 151 unsigned int flags, unsigned int color, 152 struct drm_clip_rect *rects, 153 unsigned int num_rects) 154 { 155 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); 156 157 return 0; 146 158 } 147 159 148 160 static const struct drm_framebuffer_funcs vbox_fb_funcs = { 149 150 161 .destroy = vbox_user_framebuffer_destroy, 162 .dirty = vbox_user_framebuffer_dirty, 151 163 }; 152 164 153 154 165 int vbox_framebuffer_init(struct drm_device *dev, 155 166 struct vbox_framebuffer *vbox_fb, 156 167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) 157 158 #endif 159 160 161 { 162 168 const 169 #endif 170 struct DRM_MODE_FB_CMD *mode_cmd, 171 struct drm_gem_object *obj) 172 { 173 int ret; 163 174 164 175 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 165 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd); 166 #else 167 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd); 168 #endif 169 vbox_fb->obj = obj; 170 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs); 171 if (ret) { 172 DRM_ERROR("framebuffer init failed %d\n", ret); 173 return ret; 174 } 175 return 0; 176 } 177 178 static struct drm_framebuffer * 179 vbox_user_framebuffer_create(struct drm_device *dev, 180 struct drm_file *filp, 176 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd); 177 #else 178 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd); 179 #endif 180 vbox_fb->obj = obj; 181 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs); 182 if (ret) { 183 DRM_ERROR("framebuffer init failed %d\n", ret); 184 return ret; 185 } 186 187 return 0; 188 } 189 190 static struct drm_framebuffer *vbox_user_framebuffer_create( 191 struct drm_device *dev, 192 struct drm_file *filp, 181 193 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) 182 const 183 #endif 184 struct drm_mode_fb_cmd2 *mode_cmd) 185 { 186 struct drm_gem_object *obj; 187 struct vbox_framebuffer *vbox_fb; 188 int ret; 194 const struct drm_mode_fb_cmd2 *mode_cmd) 195 #else 196 struct drm_mode_fb_cmd2 *mode_cmd) 197 #endif 198 { 199 struct drm_gem_object *obj; 200 struct vbox_framebuffer *vbox_fb; 201 int ret; 189 202 190 203 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) 191 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); 192 #else 193 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); 194 #endif 195 if (obj == NULL) 196 return ERR_PTR(-ENOENT); 197 198 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); 199 if (!vbox_fb) { 200 drm_gem_object_unreference_unlocked(obj); 201 return ERR_PTR(-ENOMEM); 202 } 203 204 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj); 205 if (ret) { 206 drm_gem_object_unreference_unlocked(obj); 207 kfree(vbox_fb); 208 return ERR_PTR(ret); 209 } 210 return &vbox_fb->base; 204 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); 205 #else 206 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); 207 #endif 208 if (!obj) 209 return ERR_PTR(-ENOENT); 210 211 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); 212 if (!vbox_fb) { 213 drm_gem_object_unreference_unlocked(obj); 214 return ERR_PTR(-ENOMEM); 215 } 216 217 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj); 218 if (ret) { 219 drm_gem_object_unreference_unlocked(obj); 220 kfree(vbox_fb); 221 return ERR_PTR(ret); 222 } 223 224 return &vbox_fb->base; 211 225 } 212 226 213 227 static const struct drm_mode_config_funcs vbox_mode_funcs = { 214 228 .fb_create = vbox_user_framebuffer_create, 215 229 }; 216 230 217 231 static void vbox_accel_fini(struct vbox_private *vbox) 218 232 { 219 if (vbox->vbva_info) 220 { 221 vbox_disable_accel(vbox); 222 kfree(vbox->vbva_info); 223 vbox->vbva_info = NULL; 224 } 225 if (vbox->vbva_buffers) { 226 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers); 227 vbox->vbva_buffers = NULL; 228 } 233 if (vbox->vbva_info) { 234 vbox_disable_accel(vbox); 235 kfree(vbox->vbva_info); 236 vbox->vbva_info = NULL; 237 } 238 if (vbox->vbva_buffers) { 239 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers); 240 vbox->vbva_buffers = NULL; 241 } 229 242 } 230 243 231 244 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) 232 # 233 ioremap(pci_resource_start(dev, bar) + offset, maxlen)245 #define pci_iomap_range(dev, bar, offset, maxlen) \ 246 ioremap(pci_resource_start(dev, bar) + (offset), maxlen) 234 247 #endif 235 248 236 249 static int vbox_accel_init(struct vbox_private *vbox) 237 250 { 238 unsigned i; 239 240 vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info), 241 GFP_KERNEL); 242 if (!vbox->vbva_info) 243 return -ENOMEM; 244 245 /* Take a command buffer for each screen from the end of usable VRAM. */ 246 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; 247 248 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0, 249 vbox->available_vram_size, 250 vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE); 251 if (!vbox->vbva_buffers) 252 return -ENOMEM; 253 254 for (i = 0; i < vbox->num_crtcs; ++i) 255 VBoxVBVASetupBufferContext(&vbox->vbva_info[i], 256 vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE, 257 VBVA_MIN_BUFFER_SIZE); 258 return 0; 251 unsigned int i; 252 253 vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info), 254 GFP_KERNEL); 255 if (!vbox->vbva_info) 256 return -ENOMEM; 257 258 /* Take a command buffer for each screen from the end of usable VRAM. */ 259 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; 260 261 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0, 262 vbox->available_vram_size, 263 vbox->num_crtcs * 264 VBVA_MIN_BUFFER_SIZE); 265 if (!vbox->vbva_buffers) 266 return -ENOMEM; 267 268 for (i = 0; i < vbox->num_crtcs; ++i) 269 VBoxVBVASetupBufferContext(&vbox->vbva_info[i], 270 vbox->available_vram_size + 271 i * VBVA_MIN_BUFFER_SIZE, 272 VBVA_MIN_BUFFER_SIZE); 273 274 return 0; 259 275 } 260 276 … … 262 278 static bool have_hgsmi_mode_hints(struct vbox_private *vbox) 263 279 { 264 uint32_t have_hints, have_cursor; 265 266 return RT_SUCCESS(VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints)) 267 && RT_SUCCESS(VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor)) 268 && have_hints == VINF_SUCCESS 269 && have_cursor == VINF_SUCCESS; 270 } 271 272 /** Set up our heaps and data exchange buffers in VRAM before handing the rest 273 * to the memory manager. */ 280 u32 have_hints, have_cursor; 281 int ret; 282 283 ret = VBoxQueryConfHGSMI(vbox->guest_pool, 284 VBOX_VBVA_CONF32_MODE_HINT_REPORTING, 285 &have_hints); 286 if (RT_FAILURE(ret)) 287 return false; 288 289 ret = VBoxQueryConfHGSMI(vbox->guest_pool, 290 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, 291 &have_cursor); 292 if (RT_FAILURE(ret)) 293 return false; 294 295 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS; 296 } 297 298 /** 299 * Set up our heaps and data exchange buffers in VRAM before handing the rest 300 * to the memory manager. 301 */ 274 302 static int vbox_hw_init(struct vbox_private *vbox) 275 303 { 276 int ret; 277 278 vbox->full_vram_size = VBoxVideoGetVRAMSize(); 279 vbox->any_pitch = VBoxVideoAnyWidthAllowed(); 280 281 DRM_INFO("VRAM %08x\n", vbox->full_vram_size); 282 283 /* Map guest-heap at end of vram */ 284 vbox->guest_heap = pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox), 285 GUEST_HEAP_SIZE); 286 if (!vbox->guest_heap) 287 return -ENOMEM; 288 289 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ 290 vbox->guest_pool = gen_pool_create(4, -1); 291 if (!vbox->guest_pool) 292 return -ENOMEM; 293 294 ret = gen_pool_add_virt(vbox->guest_pool, 295 (unsigned long)vbox->guest_heap, 296 GUEST_HEAP_OFFSET(vbox), 297 GUEST_HEAP_USABLE_SIZE, -1); 298 if (ret) 299 return ret; 300 301 /* Reduce available VRAM size to reflect the guest heap. */ 302 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); 303 /* Linux drm represents monitors as a 32-bit array. */ 304 vbox->num_crtcs = min(VBoxHGSMIGetMonitorCount(vbox->guest_pool), 305 (uint32_t)VBOX_MAX_SCREENS); 306 if (!have_hgsmi_mode_hints(vbox)) 307 return -ENOTSUPP; 308 vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL); 309 if (!vbox->last_mode_hints) 310 return -ENOMEM; 311 return vbox_accel_init(vbox); 304 int ret; 305 306 vbox->full_vram_size = VBoxVideoGetVRAMSize(); 307 vbox->any_pitch = VBoxVideoAnyWidthAllowed(); 308 309 DRM_INFO("VRAM %08x\n", vbox->full_vram_size); 310 311 /* Map guest-heap at end of vram */ 312 vbox->guest_heap = 313 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox), 314 GUEST_HEAP_SIZE); 315 if (!vbox->guest_heap) 316 return -ENOMEM; 317 318 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ 319 vbox->guest_pool = gen_pool_create(4, -1); 320 if (!vbox->guest_pool) 321 return -ENOMEM; 322 323 ret = gen_pool_add_virt(vbox->guest_pool, 324 (unsigned long)vbox->guest_heap, 325 GUEST_HEAP_OFFSET(vbox), 326 GUEST_HEAP_USABLE_SIZE, -1); 327 if (ret) 328 return ret; 329 330 /* Reduce available VRAM size to reflect the guest heap. */ 331 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); 332 /* Linux drm represents monitors as a 32-bit array. */ 333 vbox->num_crtcs = min_t(u32, VBoxHGSMIGetMonitorCount(vbox->guest_pool), 334 VBOX_MAX_SCREENS); 335 336 if (!have_hgsmi_mode_hints(vbox)) 337 return -ENOTSUPP; 338 339 vbox->last_mode_hints = 340 kcalloc(vbox->num_crtcs, sizeof(VBVAMODEHINT), GFP_KERNEL); 341 if (!vbox->last_mode_hints) 342 return -ENOMEM; 343 344 return vbox_accel_init(vbox); 312 345 } 313 346 314 347 static void vbox_hw_fini(struct vbox_private *vbox) 315 348 { 316 vbox_accel_fini(vbox); 317 if (vbox->last_mode_hints) 318 kfree(vbox->last_mode_hints); 319 vbox->last_mode_hints = NULL; 349 vbox_accel_fini(vbox); 350 kfree(vbox->last_mode_hints); 351 vbox->last_mode_hints = NULL; 320 352 } 321 353 322 354 int vbox_driver_load(struct drm_device *dev, unsigned long flags) 323 355 { 324 struct vbox_private *vbox; 325 int ret = 0; 326 327 if (!VBoxHGSMIIsSupported()) 328 return -ENODEV; 329 vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL); 330 if (!vbox) 331 return -ENOMEM; 332 333 dev->dev_private = vbox; 334 vbox->dev = dev; 335 336 mutex_init(&vbox->hw_mutex); 337 338 ret = vbox_hw_init(vbox); 339 if (ret) 340 goto out_free; 341 342 ret = vbox_mm_init(vbox); 343 if (ret) 344 goto out_free; 345 346 drm_mode_config_init(dev); 347 348 dev->mode_config.funcs = (void *)&vbox_mode_funcs; 349 dev->mode_config.min_width = 64; 350 dev->mode_config.min_height = 64; 351 dev->mode_config.preferred_depth = 24; 352 dev->mode_config.max_width = VBE_DISPI_MAX_XRES; 353 dev->mode_config.max_height = VBE_DISPI_MAX_YRES; 354 355 ret = vbox_mode_init(dev); 356 if (ret) 357 goto out_free; 358 359 ret = vbox_irq_init(vbox); 360 if (ret) 361 goto out_free; 362 363 ret = vbox_fbdev_init(dev); 364 if (ret) 365 goto out_free; 366 return 0; 356 struct vbox_private *vbox; 357 int ret = 0; 358 359 if (!VBoxHGSMIIsSupported()) 360 return -ENODEV; 361 362 vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); 363 if (!vbox) 364 return -ENOMEM; 365 366 dev->dev_private = vbox; 367 vbox->dev = dev; 368 369 mutex_init(&vbox->hw_mutex); 370 371 ret = vbox_hw_init(vbox); 372 if (ret) 373 goto out_free; 374 375 ret = vbox_mm_init(vbox); 376 if (ret) 377 goto out_free; 378 379 drm_mode_config_init(dev); 380 381 dev->mode_config.funcs = (void *)&vbox_mode_funcs; 382 dev->mode_config.min_width = 64; 383 dev->mode_config.min_height = 64; 384 dev->mode_config.preferred_depth = 24; 385 dev->mode_config.max_width = VBE_DISPI_MAX_XRES; 386 dev->mode_config.max_height = VBE_DISPI_MAX_YRES; 387 388 ret = vbox_mode_init(dev); 389 if (ret) 390 goto out_free; 391 392 ret = vbox_irq_init(vbox); 393 if (ret) 394 goto out_free; 395 396 ret = vbox_fbdev_init(dev); 397 if (ret) 398 goto out_free; 399 400 return 0; 401 367 402 out_free: 368 369 403 vbox_driver_unload(dev); 404 return ret; 370 405 } 371 406 … … 376 411 #endif 377 412 { 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 413 struct vbox_private *vbox = dev->dev_private; 414 415 vbox_fbdev_fini(dev); 416 vbox_irq_fini(vbox); 417 vbox_mode_fini(dev); 418 if (dev->mode_config.funcs) 419 drm_mode_config_cleanup(dev); 420 421 vbox_hw_fini(vbox); 422 vbox_mm_fini(vbox); 423 if (vbox->guest_pool) 424 gen_pool_destroy(vbox->guest_pool); 425 if (vbox->guest_heap) 426 pci_iounmap(dev->pdev, vbox->guest_heap); 427 kfree(vbox); 428 dev->dev_private = NULL; 394 429 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 395 return 0; 396 #endif 397 } 398 399 /** @note this is described in the DRM framework documentation. AST does not 400 * have it, but we get an oops on driver unload if it is not present. */ 430 return 0; 431 #endif 432 } 433 434 /** 435 * @note this is described in the DRM framework documentation. AST does not 436 * have it, but we get an oops on driver unload if it is not present. 437 */ 401 438 void vbox_driver_lastclose(struct drm_device *dev) 402 439 { 403 440 struct vbox_private *vbox = dev->dev_private; 404 441 405 442 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 406 407 408 #else 409 410 411 412 443 if (vbox->fbdev) 444 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper); 445 #else 446 drm_modeset_lock_all(dev); 447 if (vbox->fbdev) 448 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper); 449 drm_modeset_unlock_all(dev); 413 450 #endif 414 451 } 415 452 416 453 int vbox_gem_create(struct drm_device *dev, 417 u32 size, bool iskernel, 418 struct drm_gem_object **obj) 419 { 420 struct vbox_bo *vboxbo; 421 int ret; 422 423 *obj = NULL; 424 425 size = roundup(size, PAGE_SIZE); 426 if (size == 0) 427 return -EINVAL; 428 429 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo); 430 if (ret) { 431 if (ret != -ERESTARTSYS) 432 DRM_ERROR("failed to allocate GEM object\n"); 433 return ret; 434 } 435 *obj = &vboxbo->gem; 436 return 0; 454 u32 size, bool iskernel, struct drm_gem_object **obj) 455 { 456 struct vbox_bo *vboxbo; 457 int ret; 458 459 *obj = NULL; 460 461 size = roundup(size, PAGE_SIZE); 462 if (size == 0) 463 return -EINVAL; 464 465 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo); 466 if (ret) { 467 if (ret != -ERESTARTSYS) 468 DRM_ERROR("failed to allocate GEM object\n"); 469 return ret; 470 } 471 472 *obj = &vboxbo->gem; 473 474 return 0; 437 475 } 438 476 439 477 int vbox_dumb_create(struct drm_file *file, 440 struct drm_device *dev, 441 struct drm_mode_create_dumb *args) 442 { 443 int ret; 444 struct drm_gem_object *gobj; 445 u32 handle; 446 447 args->pitch = args->width * ((args->bpp + 7) / 8); 448 args->size = args->pitch * args->height; 449 450 ret = vbox_gem_create(dev, args->size, false, 451 &gobj); 452 if (ret) 453 return ret; 454 455 ret = drm_gem_handle_create(file, gobj, &handle); 456 drm_gem_object_unreference_unlocked(gobj); 457 if (ret) 458 return ret; 459 460 args->handle = handle; 461 return 0; 478 struct drm_device *dev, struct drm_mode_create_dumb *args) 479 { 480 int ret; 481 struct drm_gem_object *gobj; 482 u32 handle; 483 484 args->pitch = args->width * ((args->bpp + 7) / 8); 485 args->size = args->pitch * args->height; 486 487 ret = vbox_gem_create(dev, args->size, false, &gobj); 488 if (ret) 489 return ret; 490 491 ret = drm_gem_handle_create(file, gobj, &handle); 492 drm_gem_object_unreference_unlocked(gobj); 493 if (ret) 494 return ret; 495 496 args->handle = handle; 497 498 return 0; 462 499 } 463 500 464 501 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) 465 502 int vbox_dumb_destroy(struct drm_file *file, 466 struct drm_device *dev, 467 uint32_t handle) 468 { 469 return drm_gem_handle_delete(file, handle); 503 struct drm_device *dev, u32 handle) 504 { 505 return drm_gem_handle_delete(file, handle); 470 506 } 471 507 #endif … … 473 509 static void vbox_bo_unref(struct vbox_bo **bo) 474 510 { 475 476 477 478 479 480 481 482 if (tbo == NULL)483 484 485 } 511 struct ttm_buffer_object *tbo; 512 513 if ((*bo) == NULL) 514 return; 515 516 tbo = &((*bo)->bo); 517 ttm_bo_unref(&tbo); 518 if (!tbo) 519 *bo = NULL; 520 } 521 486 522 void vbox_gem_free_object(struct drm_gem_object *obj) 487 523 { 488 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj); 489 490 vbox_bo_unref(&vbox_bo); 491 } 492 524 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj); 525 526 vbox_bo_unref(&vbox_bo); 527 } 493 528 494 529 static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo) 495 530 { 496 531 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) 497 return bo->bo.addr_space_offset; 498 #else 499 return drm_vma_node_offset_addr(&bo->bo.vma_node); 500 #endif 501 } 532 return bo->bo.addr_space_offset; 533 #else 534 return drm_vma_node_offset_addr(&bo->bo.vma_node); 535 #endif 536 } 537 502 538 int 503 539 vbox_dumb_mmap_offset(struct drm_file *file, 504 struct drm_device *dev, 505 uint32_t handle, 506 uint64_t *offset) 507 { 508 struct drm_gem_object *obj; 509 int ret; 510 struct vbox_bo *bo; 511 512 mutex_lock(&dev->struct_mutex); 540 struct drm_device *dev, 541 u32 handle, u64 *offset) 542 { 543 struct drm_gem_object *obj; 544 int ret; 545 struct vbox_bo *bo; 546 547 mutex_lock(&dev->struct_mutex); 513 548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) 514 obj = drm_gem_object_lookup(file, handle); 515 #else 516 obj = drm_gem_object_lookup(dev, file, handle); 517 #endif 518 if (obj == NULL) { 519 ret = -ENOENT; 520 goto out_unlock; 521 } 522 523 bo = gem_to_vbox_bo(obj); 524 *offset = vbox_bo_mmap_offset(bo); 525 526 drm_gem_object_unreference(obj); 527 ret = 0; 549 obj = drm_gem_object_lookup(file, handle); 550 #else 551 obj = drm_gem_object_lookup(dev, file, handle); 552 #endif 553 if (!obj) { 554 ret = -ENOENT; 555 goto out_unlock; 556 } 557 558 bo = gem_to_vbox_bo(obj); 559 *offset = vbox_bo_mmap_offset(bo); 560 561 drm_gem_object_unreference(obj); 562 ret = 0; 563 528 564 out_unlock: 529 mutex_unlock(&dev->struct_mutex); 530 return ret; 531 532 } 565 mutex_unlock(&dev->struct_mutex); 566 return ret; 567 }
Note:
See TracChangeset
for help on using the changeset viewer.