1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <linux/export.h>
30 
31 #include "drmP.h"
32 #include "vmwgfx_drv.h"
33 
34 #include "ttm/ttm_placement.h"
35 
36 #define VMW_DIRTY_DELAY (HZ / 30)
37 
38 struct vmw_fb_par {
39 	struct vmw_private *vmw_priv;
40 
41 	void *vmalloc;
42 
43 	struct vmw_dma_buffer *vmw_bo;
44 	struct ttm_bo_kmap_obj map;
45 
46 	u32 pseudo_palette[17];
47 
48 	unsigned depth;
49 	unsigned bpp;
50 
51 	unsigned max_width;
52 	unsigned max_height;
53 
54 	void *bo_ptr;
55 	unsigned bo_size;
56 	bool bo_iowrite;
57 
58 	struct {
59 		spinlock_t lock;
60 		bool active;
61 		unsigned x1;
62 		unsigned y1;
63 		unsigned x2;
64 		unsigned y2;
65 	} dirty;
66 };
67 
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)68 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
69 			    unsigned blue, unsigned transp,
70 			    struct fb_info *info)
71 {
72 	struct vmw_fb_par *par = info->par;
73 	u32 *pal = par->pseudo_palette;
74 
75 	if (regno > 15) {
76 		DRM_ERROR("Bad regno %u.\n", regno);
77 		return 1;
78 	}
79 
80 	switch (par->depth) {
81 	case 24:
82 	case 32:
83 		pal[regno] = ((red & 0xff00) << 8) |
84 			      (green & 0xff00) |
85 			     ((blue  & 0xff00) >> 8);
86 		break;
87 	default:
88 		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
89 		return 1;
90 	}
91 
92 	return 0;
93 }
94 
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)95 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
96 			    struct fb_info *info)
97 {
98 	int depth = var->bits_per_pixel;
99 	struct vmw_fb_par *par = info->par;
100 	struct vmw_private *vmw_priv = par->vmw_priv;
101 
102 	switch (var->bits_per_pixel) {
103 	case 32:
104 		depth = (var->transp.length > 0) ? 32 : 24;
105 		break;
106 	default:
107 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
108 		return -EINVAL;
109 	}
110 
111 	switch (depth) {
112 	case 24:
113 		var->red.offset = 16;
114 		var->green.offset = 8;
115 		var->blue.offset = 0;
116 		var->red.length = 8;
117 		var->green.length = 8;
118 		var->blue.length = 8;
119 		var->transp.length = 0;
120 		var->transp.offset = 0;
121 		break;
122 	case 32:
123 		var->red.offset = 16;
124 		var->green.offset = 8;
125 		var->blue.offset = 0;
126 		var->red.length = 8;
127 		var->green.length = 8;
128 		var->blue.length = 8;
129 		var->transp.length = 8;
130 		var->transp.offset = 24;
131 		break;
132 	default:
133 		DRM_ERROR("Bad depth %u.\n", depth);
134 		return -EINVAL;
135 	}
136 
137 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 	    (var->xoffset != 0 || var->yoffset != 0)) {
139 		DRM_ERROR("Can not handle panning without display topology\n");
140 		return -EINVAL;
141 	}
142 
143 	if ((var->xoffset + var->xres) > par->max_width ||
144 	    (var->yoffset + var->yres) > par->max_height) {
145 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 		return -EINVAL;
147 	}
148 
149 	if (!vmw_kms_validate_mode_vram(vmw_priv,
150 					info->fix.line_length,
151 					var->yoffset + var->yres)) {
152 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 		return -EINVAL;
154 	}
155 
156 	return 0;
157 }
158 
vmw_fb_set_par(struct fb_info * info)159 static int vmw_fb_set_par(struct fb_info *info)
160 {
161 	struct vmw_fb_par *par = info->par;
162 	struct vmw_private *vmw_priv = par->vmw_priv;
163 	int ret;
164 
165 	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
166 				 info->fix.line_length,
167 				 par->bpp, par->depth);
168 	if (ret)
169 		return ret;
170 
171 	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
172 		/* TODO check if pitch and offset changes */
173 		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
174 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
175 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
176 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
177 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
178 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
179 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
180 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
181 	}
182 
183 	/* This is really helpful since if this fails the user
184 	 * can probably not see anything on the screen.
185 	 */
186 	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
187 
188 	return 0;
189 }
190 
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)191 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
192 			      struct fb_info *info)
193 {
194 	return 0;
195 }
196 
vmw_fb_blank(int blank,struct fb_info * info)197 static int vmw_fb_blank(int blank, struct fb_info *info)
198 {
199 	return 0;
200 }
201 
202 /*
203  * Dirty code
204  */
205 
vmw_fb_dirty_flush(struct vmw_fb_par * par)206 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
207 {
208 	struct vmw_private *vmw_priv = par->vmw_priv;
209 	struct fb_info *info = vmw_priv->fb_info;
210 	int stride = (info->fix.line_length / 4);
211 	int *src = (int *)info->screen_base;
212 	__le32 __iomem *vram_mem = par->bo_ptr;
213 	unsigned long flags;
214 	unsigned x, y, w, h;
215 	int i, k;
216 	struct {
217 		uint32_t header;
218 		SVGAFifoCmdUpdate body;
219 	} *cmd;
220 
221 	if (vmw_priv->suspended)
222 		return;
223 
224 	spin_lock_irqsave(&par->dirty.lock, flags);
225 	if (!par->dirty.active) {
226 		spin_unlock_irqrestore(&par->dirty.lock, flags);
227 		return;
228 	}
229 	x = par->dirty.x1;
230 	y = par->dirty.y1;
231 	w = min(par->dirty.x2, info->var.xres) - x;
232 	h = min(par->dirty.y2, info->var.yres) - y;
233 	par->dirty.x1 = par->dirty.x2 = 0;
234 	par->dirty.y1 = par->dirty.y2 = 0;
235 	spin_unlock_irqrestore(&par->dirty.lock, flags);
236 
237 	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
238 		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
239 			iowrite32(src[k], vram_mem + k);
240 	}
241 
242 #if 0
243 	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
244 #endif
245 
246 	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
247 	if (unlikely(cmd == NULL)) {
248 		DRM_ERROR("Fifo reserve failed.\n");
249 		return;
250 	}
251 
252 	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
253 	cmd->body.x = cpu_to_le32(x);
254 	cmd->body.y = cpu_to_le32(y);
255 	cmd->body.width = cpu_to_le32(w);
256 	cmd->body.height = cpu_to_le32(h);
257 	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
258 }
259 
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)260 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
261 			      unsigned x1, unsigned y1,
262 			      unsigned width, unsigned height)
263 {
264 	struct fb_info *info = par->vmw_priv->fb_info;
265 	unsigned long flags;
266 	unsigned x2 = x1 + width;
267 	unsigned y2 = y1 + height;
268 
269 	spin_lock_irqsave(&par->dirty.lock, flags);
270 	if (par->dirty.x1 == par->dirty.x2) {
271 		par->dirty.x1 = x1;
272 		par->dirty.y1 = y1;
273 		par->dirty.x2 = x2;
274 		par->dirty.y2 = y2;
275 		/* if we are active start the dirty work
276 		 * we share the work with the defio system */
277 		if (par->dirty.active)
278 			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
279 	} else {
280 		if (x1 < par->dirty.x1)
281 			par->dirty.x1 = x1;
282 		if (y1 < par->dirty.y1)
283 			par->dirty.y1 = y1;
284 		if (x2 > par->dirty.x2)
285 			par->dirty.x2 = x2;
286 		if (y2 > par->dirty.y2)
287 			par->dirty.y2 = y2;
288 	}
289 	spin_unlock_irqrestore(&par->dirty.lock, flags);
290 }
291 
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)292 static void vmw_deferred_io(struct fb_info *info,
293 			    struct list_head *pagelist)
294 {
295 	struct vmw_fb_par *par = info->par;
296 	unsigned long start, end, min, max;
297 	unsigned long flags;
298 	struct page *page;
299 	int y1, y2;
300 
301 	min = ULONG_MAX;
302 	max = 0;
303 	list_for_each_entry(page, pagelist, lru) {
304 		start = page->index << PAGE_SHIFT;
305 		end = start + PAGE_SIZE - 1;
306 		min = min(min, start);
307 		max = max(max, end);
308 	}
309 
310 	if (min < max) {
311 		y1 = min / info->fix.line_length;
312 		y2 = (max / info->fix.line_length) + 1;
313 
314 		spin_lock_irqsave(&par->dirty.lock, flags);
315 		par->dirty.x1 = 0;
316 		par->dirty.y1 = y1;
317 		par->dirty.x2 = info->var.xres;
318 		par->dirty.y2 = y2;
319 		spin_unlock_irqrestore(&par->dirty.lock, flags);
320 	}
321 
322 	vmw_fb_dirty_flush(par);
323 };
324 
325 struct fb_deferred_io vmw_defio = {
326 	.delay		= VMW_DIRTY_DELAY,
327 	.deferred_io	= vmw_deferred_io,
328 };
329 
330 /*
331  * Draw code
332  */
333 
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)334 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
335 {
336 	cfb_fillrect(info, rect);
337 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
338 			  rect->width, rect->height);
339 }
340 
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)341 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
342 {
343 	cfb_copyarea(info, region);
344 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
345 			  region->width, region->height);
346 }
347 
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)348 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
349 {
350 	cfb_imageblit(info, image);
351 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
352 			  image->width, image->height);
353 }
354 
355 /*
356  * Bring up code
357  */
358 
359 static struct fb_ops vmw_fb_ops = {
360 	.owner = THIS_MODULE,
361 	.fb_check_var = vmw_fb_check_var,
362 	.fb_set_par = vmw_fb_set_par,
363 	.fb_setcolreg = vmw_fb_setcolreg,
364 	.fb_fillrect = vmw_fb_fillrect,
365 	.fb_copyarea = vmw_fb_copyarea,
366 	.fb_imageblit = vmw_fb_imageblit,
367 	.fb_pan_display = vmw_fb_pan_display,
368 	.fb_blank = vmw_fb_blank,
369 };
370 
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_dma_buffer ** out)371 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
372 			    size_t size, struct vmw_dma_buffer **out)
373 {
374 	struct vmw_dma_buffer *vmw_bo;
375 	struct ttm_placement ne_placement = vmw_vram_ne_placement;
376 	int ret;
377 
378 	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
379 
380 	/* interuptable? */
381 	ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
382 	if (unlikely(ret != 0))
383 		return ret;
384 
385 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
386 	if (!vmw_bo)
387 		goto err_unlock;
388 
389 	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390 			      &ne_placement,
391 			      false,
392 			      &vmw_dmabuf_bo_free);
393 	if (unlikely(ret != 0))
394 		goto err_unlock; /* init frees the buffer on failure */
395 
396 	*out = vmw_bo;
397 
398 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
399 
400 	return 0;
401 
402 err_unlock:
403 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
404 	return ret;
405 }
406 
vmw_fb_init(struct vmw_private * vmw_priv)407 int vmw_fb_init(struct vmw_private *vmw_priv)
408 {
409 	struct device *device = &vmw_priv->dev->pdev->dev;
410 	struct vmw_fb_par *par;
411 	struct fb_info *info;
412 	unsigned initial_width, initial_height;
413 	unsigned fb_width, fb_height;
414 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
415 	int ret;
416 
417 	/* XXX These shouldn't be hardcoded. */
418 	initial_width = 800;
419 	initial_height = 600;
420 
421 	fb_bpp = 32;
422 	fb_depth = 24;
423 
424 	/* XXX As shouldn't these be as well. */
425 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
426 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
427 
428 	initial_width = min(fb_width, initial_width);
429 	initial_height = min(fb_height, initial_height);
430 
431 	fb_pitch = fb_width * fb_bpp / 8;
432 	fb_size = fb_pitch * fb_height;
433 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
434 
435 	info = framebuffer_alloc(sizeof(*par), device);
436 	if (!info)
437 		return -ENOMEM;
438 
439 	/*
440 	 * Par
441 	 */
442 	vmw_priv->fb_info = info;
443 	par = info->par;
444 	par->vmw_priv = vmw_priv;
445 	par->depth = fb_depth;
446 	par->bpp = fb_bpp;
447 	par->vmalloc = NULL;
448 	par->max_width = fb_width;
449 	par->max_height = fb_height;
450 
451 	/*
452 	 * Create buffers and alloc memory
453 	 */
454 	par->vmalloc = vmalloc(fb_size);
455 	if (unlikely(par->vmalloc == NULL)) {
456 		ret = -ENOMEM;
457 		goto err_free;
458 	}
459 
460 	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
461 	if (unlikely(ret != 0))
462 		goto err_free;
463 
464 	ret = ttm_bo_kmap(&par->vmw_bo->base,
465 			  0,
466 			  par->vmw_bo->base.num_pages,
467 			  &par->map);
468 	if (unlikely(ret != 0))
469 		goto err_unref;
470 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
471 	par->bo_size = fb_size;
472 
473 	/*
474 	 * Fixed and var
475 	 */
476 	strcpy(info->fix.id, "svgadrmfb");
477 	info->fix.type = FB_TYPE_PACKED_PIXELS;
478 	info->fix.visual = FB_VISUAL_TRUECOLOR;
479 	info->fix.type_aux = 0;
480 	info->fix.xpanstep = 1; /* doing it in hw */
481 	info->fix.ypanstep = 1; /* doing it in hw */
482 	info->fix.ywrapstep = 0;
483 	info->fix.accel = FB_ACCEL_NONE;
484 	info->fix.line_length = fb_pitch;
485 
486 	info->fix.smem_start = 0;
487 	info->fix.smem_len = fb_size;
488 
489 	info->pseudo_palette = par->pseudo_palette;
490 	info->screen_base = par->vmalloc;
491 	info->screen_size = fb_size;
492 
493 	info->flags = FBINFO_DEFAULT;
494 	info->fbops = &vmw_fb_ops;
495 
496 	/* 24 depth per default */
497 	info->var.red.offset = 16;
498 	info->var.green.offset = 8;
499 	info->var.blue.offset = 0;
500 	info->var.red.length = 8;
501 	info->var.green.length = 8;
502 	info->var.blue.length = 8;
503 	info->var.transp.offset = 0;
504 	info->var.transp.length = 0;
505 
506 	info->var.xres_virtual = fb_width;
507 	info->var.yres_virtual = fb_height;
508 	info->var.bits_per_pixel = par->bpp;
509 	info->var.xoffset = 0;
510 	info->var.yoffset = 0;
511 	info->var.activate = FB_ACTIVATE_NOW;
512 	info->var.height = -1;
513 	info->var.width = -1;
514 
515 	info->var.xres = initial_width;
516 	info->var.yres = initial_height;
517 
518 #if 0
519 	info->pixmap.size = 64*1024;
520 	info->pixmap.buf_align = 8;
521 	info->pixmap.access_align = 32;
522 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
523 	info->pixmap.scan_align = 1;
524 #else
525 	info->pixmap.size = 0;
526 	info->pixmap.buf_align = 8;
527 	info->pixmap.access_align = 32;
528 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
529 	info->pixmap.scan_align = 1;
530 #endif
531 
532 	info->apertures = alloc_apertures(1);
533 	if (!info->apertures) {
534 		ret = -ENOMEM;
535 		goto err_aper;
536 	}
537 	info->apertures->ranges[0].base = vmw_priv->vram_start;
538 	info->apertures->ranges[0].size = vmw_priv->vram_size;
539 
540 	/*
541 	 * Dirty & Deferred IO
542 	 */
543 	par->dirty.x1 = par->dirty.x2 = 0;
544 	par->dirty.y1 = par->dirty.y2 = 0;
545 	par->dirty.active = true;
546 	spin_lock_init(&par->dirty.lock);
547 	info->fbdefio = &vmw_defio;
548 	fb_deferred_io_init(info);
549 
550 	ret = register_framebuffer(info);
551 	if (unlikely(ret != 0))
552 		goto err_defio;
553 
554 	return 0;
555 
556 err_defio:
557 	fb_deferred_io_cleanup(info);
558 err_aper:
559 	ttm_bo_kunmap(&par->map);
560 err_unref:
561 	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
562 err_free:
563 	vfree(par->vmalloc);
564 	framebuffer_release(info);
565 	vmw_priv->fb_info = NULL;
566 
567 	return ret;
568 }
569 
vmw_fb_close(struct vmw_private * vmw_priv)570 int vmw_fb_close(struct vmw_private *vmw_priv)
571 {
572 	struct fb_info *info;
573 	struct vmw_fb_par *par;
574 	struct ttm_buffer_object *bo;
575 
576 	if (!vmw_priv->fb_info)
577 		return 0;
578 
579 	info = vmw_priv->fb_info;
580 	par = info->par;
581 	bo = &par->vmw_bo->base;
582 	par->vmw_bo = NULL;
583 
584 	/* ??? order */
585 	fb_deferred_io_cleanup(info);
586 	unregister_framebuffer(info);
587 
588 	ttm_bo_kunmap(&par->map);
589 	ttm_bo_unref(&bo);
590 
591 	vfree(par->vmalloc);
592 	framebuffer_release(info);
593 
594 	return 0;
595 }
596 
vmw_fb_off(struct vmw_private * vmw_priv)597 int vmw_fb_off(struct vmw_private *vmw_priv)
598 {
599 	struct fb_info *info;
600 	struct vmw_fb_par *par;
601 	unsigned long flags;
602 
603 	if (!vmw_priv->fb_info)
604 		return -EINVAL;
605 
606 	info = vmw_priv->fb_info;
607 	par = info->par;
608 
609 	spin_lock_irqsave(&par->dirty.lock, flags);
610 	par->dirty.active = false;
611 	spin_unlock_irqrestore(&par->dirty.lock, flags);
612 
613 	flush_delayed_work_sync(&info->deferred_work);
614 
615 	par->bo_ptr = NULL;
616 	ttm_bo_kunmap(&par->map);
617 
618 	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
619 
620 	return 0;
621 }
622 
vmw_fb_on(struct vmw_private * vmw_priv)623 int vmw_fb_on(struct vmw_private *vmw_priv)
624 {
625 	struct fb_info *info;
626 	struct vmw_fb_par *par;
627 	unsigned long flags;
628 	bool dummy;
629 	int ret;
630 
631 	if (!vmw_priv->fb_info)
632 		return -EINVAL;
633 
634 	info = vmw_priv->fb_info;
635 	par = info->par;
636 
637 	/* we are already active */
638 	if (par->bo_ptr != NULL)
639 		return 0;
640 
641 	/* Make sure that all overlays are stoped when we take over */
642 	vmw_overlay_stop_all(vmw_priv);
643 
644 	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
645 	if (unlikely(ret != 0)) {
646 		DRM_ERROR("could not move buffer to start of VRAM\n");
647 		goto err_no_buffer;
648 	}
649 
650 	ret = ttm_bo_kmap(&par->vmw_bo->base,
651 			  0,
652 			  par->vmw_bo->base.num_pages,
653 			  &par->map);
654 	BUG_ON(ret != 0);
655 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
656 
657 	spin_lock_irqsave(&par->dirty.lock, flags);
658 	par->dirty.active = true;
659 	spin_unlock_irqrestore(&par->dirty.lock, flags);
660 
661 err_no_buffer:
662 	vmw_fb_set_par(info);
663 
664 	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
665 
666 	/* If there already was stuff dirty we wont
667 	 * schedule a new work, so lets do it now */
668 	schedule_delayed_work(&info->deferred_work, 0);
669 
670 	return 0;
671 }
672