drm: Nuke fb->bits_per_pixel
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fb.c
1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <linux/export.h>
30
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34
35 #include <drm/ttm/ttm_placement.h>
36
37 #define VMW_DIRTY_DELAY (HZ / 30)
38
39 struct vmw_fb_par {
40         struct vmw_private *vmw_priv;
41
42         void *vmalloc;
43
44         struct mutex bo_mutex;
45         struct vmw_dma_buffer *vmw_bo;
46         struct ttm_bo_kmap_obj map;
47         void *bo_ptr;
48         unsigned bo_size;
49         struct drm_framebuffer *set_fb;
50         struct drm_display_mode *set_mode;
51         u32 fb_x;
52         u32 fb_y;
53         bool bo_iowrite;
54
55         u32 pseudo_palette[17];
56
57         unsigned max_width;
58         unsigned max_height;
59
60         struct {
61                 spinlock_t lock;
62                 bool active;
63                 unsigned x1;
64                 unsigned y1;
65                 unsigned x2;
66                 unsigned y2;
67         } dirty;
68
69         struct drm_crtc *crtc;
70         struct drm_connector *con;
71         struct delayed_work local_work;
72 };
73
74 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
75                             unsigned blue, unsigned transp,
76                             struct fb_info *info)
77 {
78         struct vmw_fb_par *par = info->par;
79         u32 *pal = par->pseudo_palette;
80
81         if (regno > 15) {
82                 DRM_ERROR("Bad regno %u.\n", regno);
83                 return 1;
84         }
85
86         switch (par->set_fb->format->depth) {
87         case 24:
88         case 32:
89                 pal[regno] = ((red & 0xff00) << 8) |
90                               (green & 0xff00) |
91                              ((blue  & 0xff00) >> 8);
92                 break;
93         default:
94                 DRM_ERROR("Bad depth %u, bpp %u.\n",
95                           par->set_fb->format->depth,
96                           par->set_fb->format->cpp[0] * 8);
97                 return 1;
98         }
99
100         return 0;
101 }
102
103 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
104                             struct fb_info *info)
105 {
106         int depth = var->bits_per_pixel;
107         struct vmw_fb_par *par = info->par;
108         struct vmw_private *vmw_priv = par->vmw_priv;
109
110         switch (var->bits_per_pixel) {
111         case 32:
112                 depth = (var->transp.length > 0) ? 32 : 24;
113                 break;
114         default:
115                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
116                 return -EINVAL;
117         }
118
119         switch (depth) {
120         case 24:
121                 var->red.offset = 16;
122                 var->green.offset = 8;
123                 var->blue.offset = 0;
124                 var->red.length = 8;
125                 var->green.length = 8;
126                 var->blue.length = 8;
127                 var->transp.length = 0;
128                 var->transp.offset = 0;
129                 break;
130         case 32:
131                 var->red.offset = 16;
132                 var->green.offset = 8;
133                 var->blue.offset = 0;
134                 var->red.length = 8;
135                 var->green.length = 8;
136                 var->blue.length = 8;
137                 var->transp.length = 8;
138                 var->transp.offset = 24;
139                 break;
140         default:
141                 DRM_ERROR("Bad depth %u.\n", depth);
142                 return -EINVAL;
143         }
144
145         if ((var->xoffset + var->xres) > par->max_width ||
146             (var->yoffset + var->yres) > par->max_height) {
147                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
148                 return -EINVAL;
149         }
150
151         if (!vmw_kms_validate_mode_vram(vmw_priv,
152                                         var->xres * var->bits_per_pixel/8,
153                                         var->yoffset + var->yres)) {
154                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
155                 return -EINVAL;
156         }
157
158         return 0;
159 }
160
161 static int vmw_fb_blank(int blank, struct fb_info *info)
162 {
163         return 0;
164 }
165
166 /*
167  * Dirty code
168  */
169
170 static void vmw_fb_dirty_flush(struct work_struct *work)
171 {
172         struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
173                                               local_work.work);
174         struct vmw_private *vmw_priv = par->vmw_priv;
175         struct fb_info *info = vmw_priv->fb_info;
176         unsigned long irq_flags;
177         s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
178         u32 cpp, max_x, max_y;
179         struct drm_clip_rect clip;
180         struct drm_framebuffer *cur_fb;
181         u8 *src_ptr, *dst_ptr;
182
183         if (vmw_priv->suspended)
184                 return;
185
186         mutex_lock(&par->bo_mutex);
187         cur_fb = par->set_fb;
188         if (!cur_fb)
189                 goto out_unlock;
190
191         spin_lock_irqsave(&par->dirty.lock, irq_flags);
192         if (!par->dirty.active) {
193                 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
194                 goto out_unlock;
195         }
196
197         /*
198          * Handle panning when copying from vmalloc to framebuffer.
199          * Clip dirty area to framebuffer.
200          */
201         cpp = cur_fb->format->cpp[0];
202         max_x = par->fb_x + cur_fb->width;
203         max_y = par->fb_y + cur_fb->height;
204
205         dst_x1 = par->dirty.x1 - par->fb_x;
206         dst_y1 = par->dirty.y1 - par->fb_y;
207         dst_x1 = max_t(s32, dst_x1, 0);
208         dst_y1 = max_t(s32, dst_y1, 0);
209
210         dst_x2 = par->dirty.x2 - par->fb_x;
211         dst_y2 = par->dirty.y2 - par->fb_y;
212         dst_x2 = min_t(s32, dst_x2, max_x);
213         dst_y2 = min_t(s32, dst_y2, max_y);
214         w = dst_x2 - dst_x1;
215         h = dst_y2 - dst_y1;
216         w = max_t(s32, 0, w);
217         h = max_t(s32, 0, h);
218
219         par->dirty.x1 = par->dirty.x2 = 0;
220         par->dirty.y1 = par->dirty.y2 = 0;
221         spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
222
223         if (w && h) {
224                 dst_ptr = (u8 *)par->bo_ptr  +
225                         (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
226                 src_ptr = (u8 *)par->vmalloc +
227                         ((dst_y1 + par->fb_y) * info->fix.line_length +
228                          (dst_x1 + par->fb_x) * cpp);
229
230                 while (h-- > 0) {
231                         memcpy(dst_ptr, src_ptr, w*cpp);
232                         dst_ptr += par->set_fb->pitches[0];
233                         src_ptr += info->fix.line_length;
234                 }
235
236                 clip.x1 = dst_x1;
237                 clip.x2 = dst_x2;
238                 clip.y1 = dst_y1;
239                 clip.y2 = dst_y2;
240
241                 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
242                                                        &clip, 1));
243                 vmw_fifo_flush(vmw_priv, false);
244         }
245 out_unlock:
246         mutex_unlock(&par->bo_mutex);
247 }
248
249 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
250                               unsigned x1, unsigned y1,
251                               unsigned width, unsigned height)
252 {
253         unsigned long flags;
254         unsigned x2 = x1 + width;
255         unsigned y2 = y1 + height;
256
257         spin_lock_irqsave(&par->dirty.lock, flags);
258         if (par->dirty.x1 == par->dirty.x2) {
259                 par->dirty.x1 = x1;
260                 par->dirty.y1 = y1;
261                 par->dirty.x2 = x2;
262                 par->dirty.y2 = y2;
263                 /* if we are active start the dirty work
264                  * we share the work with the defio system */
265                 if (par->dirty.active)
266                         schedule_delayed_work(&par->local_work,
267                                               VMW_DIRTY_DELAY);
268         } else {
269                 if (x1 < par->dirty.x1)
270                         par->dirty.x1 = x1;
271                 if (y1 < par->dirty.y1)
272                         par->dirty.y1 = y1;
273                 if (x2 > par->dirty.x2)
274                         par->dirty.x2 = x2;
275                 if (y2 > par->dirty.y2)
276                         par->dirty.y2 = y2;
277         }
278         spin_unlock_irqrestore(&par->dirty.lock, flags);
279 }
280
281 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
282                               struct fb_info *info)
283 {
284         struct vmw_fb_par *par = info->par;
285
286         if ((var->xoffset + var->xres) > var->xres_virtual ||
287             (var->yoffset + var->yres) > var->yres_virtual) {
288                 DRM_ERROR("Requested panning can not fit in framebuffer\n");
289                 return -EINVAL;
290         }
291
292         mutex_lock(&par->bo_mutex);
293         par->fb_x = var->xoffset;
294         par->fb_y = var->yoffset;
295         if (par->set_fb)
296                 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
297                                   par->set_fb->height);
298         mutex_unlock(&par->bo_mutex);
299
300         return 0;
301 }
302
303 static void vmw_deferred_io(struct fb_info *info,
304                             struct list_head *pagelist)
305 {
306         struct vmw_fb_par *par = info->par;
307         unsigned long start, end, min, max;
308         unsigned long flags;
309         struct page *page;
310         int y1, y2;
311
312         min = ULONG_MAX;
313         max = 0;
314         list_for_each_entry(page, pagelist, lru) {
315                 start = page->index << PAGE_SHIFT;
316                 end = start + PAGE_SIZE - 1;
317                 min = min(min, start);
318                 max = max(max, end);
319         }
320
321         if (min < max) {
322                 y1 = min / info->fix.line_length;
323                 y2 = (max / info->fix.line_length) + 1;
324
325                 spin_lock_irqsave(&par->dirty.lock, flags);
326                 par->dirty.x1 = 0;
327                 par->dirty.y1 = y1;
328                 par->dirty.x2 = info->var.xres;
329                 par->dirty.y2 = y2;
330                 spin_unlock_irqrestore(&par->dirty.lock, flags);
331
332                 /*
333                  * Since we've already waited on this work once, try to
334                  * execute asap.
335                  */
336                 cancel_delayed_work(&par->local_work);
337                 schedule_delayed_work(&par->local_work, 0);
338         }
339 };
340
341 static struct fb_deferred_io vmw_defio = {
342         .delay          = VMW_DIRTY_DELAY,
343         .deferred_io    = vmw_deferred_io,
344 };
345
346 /*
347  * Draw code
348  */
349
350 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
351 {
352         cfb_fillrect(info, rect);
353         vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
354                           rect->width, rect->height);
355 }
356
357 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
358 {
359         cfb_copyarea(info, region);
360         vmw_fb_dirty_mark(info->par, region->dx, region->dy,
361                           region->width, region->height);
362 }
363
364 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
365 {
366         cfb_imageblit(info, image);
367         vmw_fb_dirty_mark(info->par, image->dx, image->dy,
368                           image->width, image->height);
369 }
370
371 /*
372  * Bring up code
373  */
374
375 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
376                             size_t size, struct vmw_dma_buffer **out)
377 {
378         struct vmw_dma_buffer *vmw_bo;
379         int ret;
380
381         (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
382
383         vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
384         if (!vmw_bo) {
385                 ret = -ENOMEM;
386                 goto err_unlock;
387         }
388
389         ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390                               &vmw_sys_placement,
391                               false,
392                               &vmw_dmabuf_bo_free);
393         if (unlikely(ret != 0))
394                 goto err_unlock; /* init frees the buffer on failure */
395
396         *out = vmw_bo;
397         ttm_write_unlock(&vmw_priv->reservation_sem);
398
399         return 0;
400
401 err_unlock:
402         ttm_write_unlock(&vmw_priv->reservation_sem);
403         return ret;
404 }
405
406 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
407                                 int *depth)
408 {
409         switch (var->bits_per_pixel) {
410         case 32:
411                 *depth = (var->transp.length > 0) ? 32 : 24;
412                 break;
413         default:
414                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
415                 return -EINVAL;
416         }
417
418         return 0;
419 }
420
421 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
422                              bool detach_bo,
423                              bool unref_bo)
424 {
425         struct drm_framebuffer *cur_fb = par->set_fb;
426         int ret;
427
428         /* Detach the KMS framebuffer from crtcs */
429         if (par->set_mode) {
430                 struct drm_mode_set set;
431
432                 set.crtc = par->crtc;
433                 set.x = 0;
434                 set.y = 0;
435                 set.mode = NULL;
436                 set.fb = NULL;
437                 set.num_connectors = 1;
438                 set.connectors = &par->con;
439                 ret = drm_mode_set_config_internal(&set);
440                 if (ret) {
441                         DRM_ERROR("Could not unset a mode.\n");
442                         return ret;
443                 }
444                 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
445                 par->set_mode = NULL;
446         }
447
448         if (cur_fb) {
449                 drm_framebuffer_unreference(cur_fb);
450                 par->set_fb = NULL;
451         }
452
453         if (par->vmw_bo && detach_bo) {
454                 if (par->bo_ptr) {
455                         ttm_bo_kunmap(&par->map);
456                         par->bo_ptr = NULL;
457                 }
458                 if (unref_bo)
459                         vmw_dmabuf_unreference(&par->vmw_bo);
460                 else
461                         vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
462         }
463
464         return 0;
465 }
466
467 static int vmw_fb_kms_framebuffer(struct fb_info *info)
468 {
469         struct drm_mode_fb_cmd2 mode_cmd;
470         struct vmw_fb_par *par = info->par;
471         struct fb_var_screeninfo *var = &info->var;
472         struct drm_framebuffer *cur_fb;
473         struct vmw_framebuffer *vfb;
474         int ret = 0, depth;
475         size_t new_bo_size;
476
477         ret = vmw_fb_compute_depth(var, &depth);
478         if (ret)
479                 return ret;
480
481         mode_cmd.width = var->xres;
482         mode_cmd.height = var->yres;
483         mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
484         mode_cmd.pixel_format =
485                 drm_mode_legacy_fb_format(var->bits_per_pixel,
486                         ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
487
488         cur_fb = par->set_fb;
489         if (cur_fb && cur_fb->width == mode_cmd.width &&
490             cur_fb->height == mode_cmd.height &&
491             cur_fb->pixel_format == mode_cmd.pixel_format &&
492             cur_fb->pitches[0] == mode_cmd.pitches[0])
493                 return 0;
494
495         /* Need new buffer object ? */
496         new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
497         ret = vmw_fb_kms_detach(par,
498                                 par->bo_size < new_bo_size ||
499                                 par->bo_size > 2*new_bo_size,
500                                 true);
501         if (ret)
502                 return ret;
503
504         if (!par->vmw_bo) {
505                 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
506                                        &par->vmw_bo);
507                 if (ret) {
508                         DRM_ERROR("Failed creating a buffer object for "
509                                   "fbdev.\n");
510                         return ret;
511                 }
512                 par->bo_size = new_bo_size;
513         }
514
515         vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
516                                       true, &mode_cmd);
517         if (IS_ERR(vfb))
518                 return PTR_ERR(vfb);
519
520         par->set_fb = &vfb->base;
521
522         return 0;
523 }
524
525 static int vmw_fb_set_par(struct fb_info *info)
526 {
527         struct vmw_fb_par *par = info->par;
528         struct vmw_private *vmw_priv = par->vmw_priv;
529         struct drm_mode_set set;
530         struct fb_var_screeninfo *var = &info->var;
531         struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
532                 DRM_MODE_TYPE_DRIVER,
533                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
534                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
535         };
536         struct drm_display_mode *old_mode;
537         struct drm_display_mode *mode;
538         int ret;
539
540         old_mode = par->set_mode;
541         mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
542         if (!mode) {
543                 DRM_ERROR("Could not create new fb mode.\n");
544                 return -ENOMEM;
545         }
546
547         mode->hdisplay = var->xres;
548         mode->vdisplay = var->yres;
549         vmw_guess_mode_timing(mode);
550
551         if (old_mode && drm_mode_equal(old_mode, mode)) {
552                 drm_mode_destroy(vmw_priv->dev, mode);
553                 mode = old_mode;
554                 old_mode = NULL;
555         } else if (!vmw_kms_validate_mode_vram(vmw_priv,
556                                         mode->hdisplay *
557                                         DIV_ROUND_UP(var->bits_per_pixel, 8),
558                                         mode->vdisplay)) {
559                 drm_mode_destroy(vmw_priv->dev, mode);
560                 return -EINVAL;
561         }
562
563         mutex_lock(&par->bo_mutex);
564         drm_modeset_lock_all(vmw_priv->dev);
565         ret = vmw_fb_kms_framebuffer(info);
566         if (ret)
567                 goto out_unlock;
568
569         par->fb_x = var->xoffset;
570         par->fb_y = var->yoffset;
571
572         set.crtc = par->crtc;
573         set.x = 0;
574         set.y = 0;
575         set.mode = mode;
576         set.fb = par->set_fb;
577         set.num_connectors = 1;
578         set.connectors = &par->con;
579
580         ret = drm_mode_set_config_internal(&set);
581         if (ret)
582                 goto out_unlock;
583
584         if (!par->bo_ptr) {
585                 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
586
587                 /*
588                  * Pin before mapping. Since we don't know in what placement
589                  * to pin, call into KMS to do it for us.
590                  */
591                 ret = vfb->pin(vfb);
592                 if (ret) {
593                         DRM_ERROR("Could not pin the fbdev framebuffer.\n");
594                         goto out_unlock;
595                 }
596
597                 ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
598                                   par->vmw_bo->base.num_pages, &par->map);
599                 if (ret) {
600                         vfb->unpin(vfb);
601                         DRM_ERROR("Could not map the fbdev framebuffer.\n");
602                         goto out_unlock;
603                 }
604
605                 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
606         }
607
608
609         vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
610                           par->set_fb->width, par->set_fb->height);
611
612         /* If there already was stuff dirty we wont
613          * schedule a new work, so lets do it now */
614
615         schedule_delayed_work(&par->local_work, 0);
616
617 out_unlock:
618         if (old_mode)
619                 drm_mode_destroy(vmw_priv->dev, old_mode);
620         par->set_mode = mode;
621
622         drm_modeset_unlock_all(vmw_priv->dev);
623         mutex_unlock(&par->bo_mutex);
624
625         return ret;
626 }
627
628
629 static struct fb_ops vmw_fb_ops = {
630         .owner = THIS_MODULE,
631         .fb_check_var = vmw_fb_check_var,
632         .fb_set_par = vmw_fb_set_par,
633         .fb_setcolreg = vmw_fb_setcolreg,
634         .fb_fillrect = vmw_fb_fillrect,
635         .fb_copyarea = vmw_fb_copyarea,
636         .fb_imageblit = vmw_fb_imageblit,
637         .fb_pan_display = vmw_fb_pan_display,
638         .fb_blank = vmw_fb_blank,
639 };
640
641 int vmw_fb_init(struct vmw_private *vmw_priv)
642 {
643         struct device *device = &vmw_priv->dev->pdev->dev;
644         struct vmw_fb_par *par;
645         struct fb_info *info;
646         unsigned fb_width, fb_height;
647         unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
648         struct drm_display_mode *init_mode;
649         int ret;
650
651         fb_bpp = 32;
652         fb_depth = 24;
653
654         /* XXX As shouldn't these be as well. */
655         fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
656         fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
657
658         fb_pitch = fb_width * fb_bpp / 8;
659         fb_size = fb_pitch * fb_height;
660         fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
661
662         info = framebuffer_alloc(sizeof(*par), device);
663         if (!info)
664                 return -ENOMEM;
665
666         /*
667          * Par
668          */
669         vmw_priv->fb_info = info;
670         par = info->par;
671         memset(par, 0, sizeof(*par));
672         INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
673         par->vmw_priv = vmw_priv;
674         par->vmalloc = NULL;
675         par->max_width = fb_width;
676         par->max_height = fb_height;
677
678         drm_modeset_lock_all(vmw_priv->dev);
679         ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
680                                       par->max_height, &par->con,
681                                       &par->crtc, &init_mode);
682         if (ret) {
683                 drm_modeset_unlock_all(vmw_priv->dev);
684                 goto err_kms;
685         }
686
687         info->var.xres = init_mode->hdisplay;
688         info->var.yres = init_mode->vdisplay;
689         drm_modeset_unlock_all(vmw_priv->dev);
690
691         /*
692          * Create buffers and alloc memory
693          */
694         par->vmalloc = vzalloc(fb_size);
695         if (unlikely(par->vmalloc == NULL)) {
696                 ret = -ENOMEM;
697                 goto err_free;
698         }
699
700         /*
701          * Fixed and var
702          */
703         strcpy(info->fix.id, "svgadrmfb");
704         info->fix.type = FB_TYPE_PACKED_PIXELS;
705         info->fix.visual = FB_VISUAL_TRUECOLOR;
706         info->fix.type_aux = 0;
707         info->fix.xpanstep = 1; /* doing it in hw */
708         info->fix.ypanstep = 1; /* doing it in hw */
709         info->fix.ywrapstep = 0;
710         info->fix.accel = FB_ACCEL_NONE;
711         info->fix.line_length = fb_pitch;
712
713         info->fix.smem_start = 0;
714         info->fix.smem_len = fb_size;
715
716         info->pseudo_palette = par->pseudo_palette;
717         info->screen_base = (char __iomem *)par->vmalloc;
718         info->screen_size = fb_size;
719
720         info->flags = FBINFO_DEFAULT;
721         info->fbops = &vmw_fb_ops;
722
723         /* 24 depth per default */
724         info->var.red.offset = 16;
725         info->var.green.offset = 8;
726         info->var.blue.offset = 0;
727         info->var.red.length = 8;
728         info->var.green.length = 8;
729         info->var.blue.length = 8;
730         info->var.transp.offset = 0;
731         info->var.transp.length = 0;
732
733         info->var.xres_virtual = fb_width;
734         info->var.yres_virtual = fb_height;
735         info->var.bits_per_pixel = fb_bpp;
736         info->var.xoffset = 0;
737         info->var.yoffset = 0;
738         info->var.activate = FB_ACTIVATE_NOW;
739         info->var.height = -1;
740         info->var.width = -1;
741
742         /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
743         info->apertures = alloc_apertures(1);
744         if (!info->apertures) {
745                 ret = -ENOMEM;
746                 goto err_aper;
747         }
748         info->apertures->ranges[0].base = vmw_priv->vram_start;
749         info->apertures->ranges[0].size = vmw_priv->vram_size;
750
751         /*
752          * Dirty & Deferred IO
753          */
754         par->dirty.x1 = par->dirty.x2 = 0;
755         par->dirty.y1 = par->dirty.y2 = 0;
756         par->dirty.active = true;
757         spin_lock_init(&par->dirty.lock);
758         mutex_init(&par->bo_mutex);
759         info->fbdefio = &vmw_defio;
760         fb_deferred_io_init(info);
761
762         ret = register_framebuffer(info);
763         if (unlikely(ret != 0))
764                 goto err_defio;
765
766         vmw_fb_set_par(info);
767
768         return 0;
769
770 err_defio:
771         fb_deferred_io_cleanup(info);
772 err_aper:
773 err_free:
774         vfree(par->vmalloc);
775 err_kms:
776         framebuffer_release(info);
777         vmw_priv->fb_info = NULL;
778
779         return ret;
780 }
781
782 int vmw_fb_close(struct vmw_private *vmw_priv)
783 {
784         struct fb_info *info;
785         struct vmw_fb_par *par;
786
787         if (!vmw_priv->fb_info)
788                 return 0;
789
790         info = vmw_priv->fb_info;
791         par = info->par;
792
793         /* ??? order */
794         fb_deferred_io_cleanup(info);
795         cancel_delayed_work_sync(&par->local_work);
796         unregister_framebuffer(info);
797
798         (void) vmw_fb_kms_detach(par, true, true);
799
800         vfree(par->vmalloc);
801         framebuffer_release(info);
802
803         return 0;
804 }
805
806 int vmw_fb_off(struct vmw_private *vmw_priv)
807 {
808         struct fb_info *info;
809         struct vmw_fb_par *par;
810         unsigned long flags;
811
812         if (!vmw_priv->fb_info)
813                 return -EINVAL;
814
815         info = vmw_priv->fb_info;
816         par = info->par;
817
818         spin_lock_irqsave(&par->dirty.lock, flags);
819         par->dirty.active = false;
820         spin_unlock_irqrestore(&par->dirty.lock, flags);
821
822         flush_delayed_work(&info->deferred_work);
823         flush_delayed_work(&par->local_work);
824
825         mutex_lock(&par->bo_mutex);
826         (void) vmw_fb_kms_detach(par, true, false);
827         mutex_unlock(&par->bo_mutex);
828
829         return 0;
830 }
831
832 int vmw_fb_on(struct vmw_private *vmw_priv)
833 {
834         struct fb_info *info;
835         struct vmw_fb_par *par;
836         unsigned long flags;
837
838         if (!vmw_priv->fb_info)
839                 return -EINVAL;
840
841         info = vmw_priv->fb_info;
842         par = info->par;
843
844         vmw_fb_set_par(info);
845         spin_lock_irqsave(&par->dirty.lock, flags);
846         par->dirty.active = true;
847         spin_unlock_irqrestore(&par->dirty.lock, flags);
848  
849         return 0;
850 }