1 // SPDX-License-Identifier: MIT
3 #include <linux/moduleparam.h>
4 #include <linux/vmalloc.h>
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_print.h>
12 #include <drm/drm_fbdev_generic.h>
14 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
16 struct drm_device *dev = fb_helper->dev;
17 struct drm_framebuffer *fb = fb_helper->fb;
19 return dev->mode_config.prefer_shadow_fbdev ||
20 dev->mode_config.prefer_shadow ||
24 /* @user: 1=userspace, 0=fbcon */
25 static int drm_fbdev_fb_open(struct fb_info *info, int user)
27 struct drm_fb_helper *fb_helper = info->par;
29 /* No need to take a ref for fbcon because it unbinds on unregister */
30 if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
36 static int drm_fbdev_fb_release(struct fb_info *info, int user)
38 struct drm_fb_helper *fb_helper = info->par;
41 module_put(fb_helper->dev->driver->fops->owner);
46 static void drm_fbdev_fb_destroy(struct fb_info *info)
48 struct drm_fb_helper *fb_helper = info->par;
55 fb_deferred_io_cleanup(info);
56 if (drm_fbdev_use_shadow_fb(fb_helper))
57 shadow = info->screen_buffer;
59 drm_fb_helper_fini(fb_helper);
63 else if (fb_helper->buffer)
64 drm_client_buffer_vunmap(fb_helper->buffer);
66 drm_client_framebuffer_delete(fb_helper->buffer);
67 drm_client_release(&fb_helper->client);
69 drm_fb_helper_unprepare(fb_helper);
73 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
75 struct drm_fb_helper *fb_helper = info->par;
77 if (drm_fbdev_use_shadow_fb(fb_helper))
78 return fb_deferred_io_mmap(info, vma);
79 else if (fb_helper->dev->driver->gem_prime_mmap)
80 return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
85 static bool drm_fbdev_use_iomem(struct fb_info *info)
87 struct drm_fb_helper *fb_helper = info->par;
88 struct drm_client_buffer *buffer = fb_helper->buffer;
90 return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
93 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
94 size_t count, loff_t *ppos)
98 if (drm_fbdev_use_iomem(info))
99 ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
101 ret = drm_fb_helper_sys_read(info, buf, count, ppos);
106 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
107 size_t count, loff_t *ppos)
111 if (drm_fbdev_use_iomem(info))
112 ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
114 ret = drm_fb_helper_sys_write(info, buf, count, ppos);
119 static void drm_fbdev_fb_fillrect(struct fb_info *info,
120 const struct fb_fillrect *rect)
122 if (drm_fbdev_use_iomem(info))
123 drm_fb_helper_cfb_fillrect(info, rect);
125 drm_fb_helper_sys_fillrect(info, rect);
128 static void drm_fbdev_fb_copyarea(struct fb_info *info,
129 const struct fb_copyarea *area)
131 if (drm_fbdev_use_iomem(info))
132 drm_fb_helper_cfb_copyarea(info, area);
134 drm_fb_helper_sys_copyarea(info, area);
137 static void drm_fbdev_fb_imageblit(struct fb_info *info,
138 const struct fb_image *image)
140 if (drm_fbdev_use_iomem(info))
141 drm_fb_helper_cfb_imageblit(info, image);
143 drm_fb_helper_sys_imageblit(info, image);
146 static const struct fb_ops drm_fbdev_fb_ops = {
147 .owner = THIS_MODULE,
148 DRM_FB_HELPER_DEFAULT_OPS,
149 .fb_open = drm_fbdev_fb_open,
150 .fb_release = drm_fbdev_fb_release,
151 .fb_destroy = drm_fbdev_fb_destroy,
152 .fb_mmap = drm_fbdev_fb_mmap,
153 .fb_read = drm_fbdev_fb_read,
154 .fb_write = drm_fbdev_fb_write,
155 .fb_fillrect = drm_fbdev_fb_fillrect,
156 .fb_copyarea = drm_fbdev_fb_copyarea,
157 .fb_imageblit = drm_fbdev_fb_imageblit,
161 * This function uses the client API to create a framebuffer backed by a dumb buffer.
163 static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
164 struct drm_fb_helper_surface_size *sizes)
166 struct drm_client_dev *client = &fb_helper->client;
167 struct drm_device *dev = fb_helper->dev;
168 struct drm_client_buffer *buffer;
169 struct drm_framebuffer *fb;
170 struct fb_info *info;
172 struct iosys_map map;
175 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
176 sizes->surface_width, sizes->surface_height,
179 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
180 buffer = drm_client_framebuffer_create(client, sizes->surface_width,
181 sizes->surface_height, format);
183 return PTR_ERR(buffer);
185 fb_helper->buffer = buffer;
186 fb_helper->fb = buffer->fb;
189 info = drm_fb_helper_alloc_info(fb_helper);
191 return PTR_ERR(info);
193 info->fbops = &drm_fbdev_fb_ops;
194 info->screen_size = sizes->surface_height * fb->pitches[0];
195 info->fix.smem_len = info->screen_size;
196 info->flags = FBINFO_DEFAULT;
198 drm_fb_helper_fill_info(info, fb_helper, sizes);
200 if (drm_fbdev_use_shadow_fb(fb_helper)) {
201 info->screen_buffer = vzalloc(info->screen_size);
202 if (!info->screen_buffer)
204 info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
206 /* Set a default deferred I/O handler */
207 fb_helper->fbdefio.delay = HZ / 20;
208 fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
210 info->fbdefio = &fb_helper->fbdefio;
211 ret = fb_deferred_io_init(info);
215 /* buffer is mapped for HW framebuffer */
216 ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
220 info->screen_base = map.vaddr_iomem;
222 info->screen_buffer = map.vaddr;
223 info->flags |= FBINFO_VIRTFB;
227 * Shamelessly leak the physical address to user-space. As
228 * page_to_phys() is undefined for I/O memory, warn in this
231 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
232 if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 &&
233 !drm_WARN_ON_ONCE(dev, map.is_iomem))
234 info->fix.smem_start =
235 page_to_phys(virt_to_page(info->screen_buffer));
242 static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
243 struct drm_clip_rect *clip,
244 struct iosys_map *dst)
246 struct drm_framebuffer *fb = fb_helper->fb;
247 size_t offset = clip->y1 * fb->pitches[0];
248 size_t len = clip->x2 - clip->x1;
252 switch (drm_format_info_bpp(fb->format, 0)) {
254 offset += clip->x1 / 8;
255 len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
258 offset += clip->x1 / 4;
259 len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
262 offset += clip->x1 / 2;
263 len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
266 offset += clip->x1 * fb->format->cpp[0];
267 len *= fb->format->cpp[0];
271 src = fb_helper->info->screen_buffer + offset;
272 iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
274 for (y = clip->y1; y < clip->y2; y++) {
275 iosys_map_memcpy_to(dst, 0, src, len);
276 iosys_map_incr(dst, fb->pitches[0]);
277 src += fb->pitches[0];
281 static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
282 struct drm_clip_rect *clip)
284 struct drm_client_buffer *buffer = fb_helper->buffer;
285 struct iosys_map map, dst;
289 * We have to pin the client buffer to its current location while
290 * flushing the shadow buffer. In the general case, concurrent
291 * modesetting operations could try to move the buffer and would
292 * fail. The modeset has to be serialized by acquiring the reservation
293 * object of the underlying BO here.
295 * For fbdev emulation, we only have to protect against fbdev modeset
296 * operations. Nothing else will involve the client buffer's BO. So it
297 * is sufficient to acquire struct drm_fb_helper.lock here.
299 mutex_lock(&fb_helper->lock);
301 ret = drm_client_buffer_vmap(buffer, &map);
306 drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
308 drm_client_buffer_vunmap(buffer);
311 mutex_unlock(&fb_helper->lock);
316 static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
318 struct drm_device *dev = helper->dev;
321 if (!drm_fbdev_use_shadow_fb(helper))
324 /* Call damage handlers only if necessary */
325 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
328 if (helper->buffer) {
329 ret = drm_fbdev_damage_blit(helper, clip);
330 if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
334 if (helper->fb->funcs->dirty) {
335 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
336 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
343 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
344 .fb_probe = drm_fbdev_fb_probe,
345 .fb_dirty = drm_fbdev_fb_dirty,
348 static void drm_fbdev_client_unregister(struct drm_client_dev *client)
350 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
352 if (fb_helper->info) {
353 drm_fb_helper_unregister_info(fb_helper);
355 drm_client_release(&fb_helper->client);
356 drm_fb_helper_unprepare(fb_helper);
361 static int drm_fbdev_client_restore(struct drm_client_dev *client)
363 drm_fb_helper_lastclose(client->dev);
368 static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
370 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
371 struct drm_device *dev = client->dev;
375 return drm_fb_helper_hotplug_event(dev->fb_helper);
377 ret = drm_fb_helper_init(dev, fb_helper);
381 if (!drm_drv_uses_atomic_modeset(dev))
382 drm_helper_disable_unused_functions(dev);
384 ret = drm_fb_helper_initial_config(fb_helper);
386 goto err_drm_fb_helper_fini;
390 err_drm_fb_helper_fini:
391 drm_fb_helper_fini(fb_helper);
393 drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
397 static const struct drm_client_funcs drm_fbdev_client_funcs = {
398 .owner = THIS_MODULE,
399 .unregister = drm_fbdev_client_unregister,
400 .restore = drm_fbdev_client_restore,
401 .hotplug = drm_fbdev_client_hotplug,
405 * drm_fbdev_generic_setup() - Setup generic fbdev emulation
407 * @preferred_bpp: Preferred bits per pixel for the device.
409 * This function sets up generic fbdev emulation for drivers that supports
410 * dumb buffers with a virtual address and that can be mmap'ed.
411 * drm_fbdev_generic_setup() shall be called after the DRM driver registered
412 * the new DRM device with drm_dev_register().
414 * Restore, hotplug events and teardown are all taken care of. Drivers that do
415 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
416 * Simple drivers might use drm_mode_config_helper_suspend().
418 * Drivers that set the dirty callback on their framebuffer will get a shadow
419 * fbdev buffer that is blitted onto the real buffer. This is done in order to
420 * make deferred I/O work with all kinds of buffers. A shadow buffer can be
421 * requested explicitly by setting struct drm_mode_config.prefer_shadow or
422 * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
423 * required to use generic fbdev emulation with SHMEM helpers.
425 * This function is safe to call even when there are no connectors present.
426 * Setup will be retried on the next hotplug event.
428 * The fbdev is destroyed by drm_dev_unregister().
430 void drm_fbdev_generic_setup(struct drm_device *dev,
431 unsigned int preferred_bpp)
433 struct drm_fb_helper *fb_helper;
436 drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
437 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
439 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
442 drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs);
444 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
446 drm_err(dev, "Failed to register client: %d\n", ret);
447 goto err_drm_client_init;
450 ret = drm_fbdev_client_hotplug(&fb_helper->client);
452 drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
454 drm_client_register(&fb_helper->client);
459 drm_fb_helper_unprepare(fb_helper);
463 EXPORT_SYMBOL(drm_fbdev_generic_setup);