Linux 6.9-rc1
[linux-2.6-microblaze.git] / drivers / gpu / drm / udl / udl_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat
4  *
5  * based in parts on udlfb.c:
6  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
7  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
8  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9  */
10
11 #include <drm/drm.h>
12 #include <drm/drm_print.h>
13 #include <drm/drm_probe_helper.h>
14
15 #include "udl_drv.h"
16
17 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
18 #define BULK_SIZE 512
19
20 #define NR_USB_REQUEST_CHANNEL 0x12
21
22 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
23 #define WRITES_IN_FLIGHT (20)
24 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
25
26 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
27
28 static int udl_parse_vendor_descriptor(struct udl_device *udl)
29 {
30         struct usb_device *udev = udl_to_usb_device(udl);
31         char *desc;
32         char *buf;
33         char *desc_end;
34
35         u8 total_len = 0;
36
37         buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
38         if (!buf)
39                 return false;
40         desc = buf;
41
42         total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
43                                     0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
44         if (total_len > 5) {
45                 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
46                         total_len, desc);
47
48                 if ((desc[0] != total_len) || /* descriptor length */
49                     (desc[1] != 0x5f) ||   /* vendor descriptor type */
50                     (desc[2] != 0x01) ||   /* version (2 bytes) */
51                     (desc[3] != 0x00) ||
52                     (desc[4] != total_len - 2)) /* length after type */
53                         goto unrecognized;
54
55                 desc_end = desc + total_len;
56                 desc += 5; /* the fixed header we've already parsed */
57
58                 while (desc < desc_end) {
59                         u8 length;
60                         u16 key;
61
62                         key = le16_to_cpu(*((u16 *) desc));
63                         desc += sizeof(u16);
64                         length = *desc;
65                         desc++;
66
67                         switch (key) {
68                         case 0x0200: { /* max_area */
69                                 u32 max_area;
70                                 max_area = le32_to_cpu(*((u32 *)desc));
71                                 DRM_DEBUG("DL chip limited to %d pixel modes\n",
72                                         max_area);
73                                 udl->sku_pixel_limit = max_area;
74                                 break;
75                         }
76                         default:
77                                 break;
78                         }
79                         desc += length;
80                 }
81         }
82
83         goto success;
84
85 unrecognized:
86         /* allow udlfb to load for now even if firmware unrecognized */
87         DRM_ERROR("Unrecognized vendor firmware descriptor\n");
88
89 success:
90         kfree(buf);
91         return true;
92 }
93
94 /*
95  * Need to ensure a channel is selected before submitting URBs
96  */
97 int udl_select_std_channel(struct udl_device *udl)
98 {
99         static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
100                                          0x1C, 0x88, 0x5E, 0x15,
101                                          0x60, 0xFE, 0xC6, 0x97,
102                                          0x16, 0x3D, 0x47, 0xF2};
103
104         void *sendbuf;
105         int ret;
106         struct usb_device *udev = udl_to_usb_device(udl);
107
108         sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
109         if (!sendbuf)
110                 return -ENOMEM;
111
112         ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
113                               NR_USB_REQUEST_CHANNEL,
114                               (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
115                               sendbuf, sizeof(set_def_chn),
116                               USB_CTRL_SET_TIMEOUT);
117         kfree(sendbuf);
118         return ret < 0 ? ret : 0;
119 }
120
121 void udl_urb_completion(struct urb *urb)
122 {
123         struct urb_node *unode = urb->context;
124         struct udl_device *udl = unode->dev;
125         unsigned long flags;
126
127         /* sync/async unlink faults aren't errors */
128         if (urb->status) {
129                 if (!(urb->status == -ENOENT ||
130                     urb->status == -ECONNRESET ||
131                     urb->status == -EPROTO ||
132                     urb->status == -ESHUTDOWN)) {
133                         DRM_ERROR("%s - nonzero write bulk status received: %d\n",
134                                 __func__, urb->status);
135                 }
136         }
137
138         urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
139
140         spin_lock_irqsave(&udl->urbs.lock, flags);
141         list_add_tail(&unode->entry, &udl->urbs.list);
142         udl->urbs.available++;
143         spin_unlock_irqrestore(&udl->urbs.lock, flags);
144
145         wake_up(&udl->urbs.sleep);
146 }
147
148 static void udl_free_urb_list(struct drm_device *dev)
149 {
150         struct udl_device *udl = to_udl(dev);
151         struct urb_node *unode;
152         struct urb *urb;
153
154         DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
155
156         /* keep waiting and freeing, until we've got 'em all */
157         while (udl->urbs.count) {
158                 spin_lock_irq(&udl->urbs.lock);
159                 urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
160                 udl->urbs.count--;
161                 spin_unlock_irq(&udl->urbs.lock);
162                 if (WARN_ON(!urb))
163                         break;
164                 unode = urb->context;
165                 /* Free each separately allocated piece */
166                 usb_free_coherent(urb->dev, udl->urbs.size,
167                                   urb->transfer_buffer, urb->transfer_dma);
168                 usb_free_urb(urb);
169                 kfree(unode);
170         }
171
172         wake_up_all(&udl->urbs.sleep);
173 }
174
175 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
176 {
177         struct udl_device *udl = to_udl(dev);
178         struct urb *urb;
179         struct urb_node *unode;
180         char *buf;
181         size_t wanted_size = count * size;
182         struct usb_device *udev = udl_to_usb_device(udl);
183
184         spin_lock_init(&udl->urbs.lock);
185         INIT_LIST_HEAD(&udl->urbs.list);
186         init_waitqueue_head(&udl->urbs.sleep);
187         udl->urbs.count = 0;
188         udl->urbs.available = 0;
189
190 retry:
191         udl->urbs.size = size;
192
193         while (udl->urbs.count * size < wanted_size) {
194                 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
195                 if (!unode)
196                         break;
197                 unode->dev = udl;
198
199                 urb = usb_alloc_urb(0, GFP_KERNEL);
200                 if (!urb) {
201                         kfree(unode);
202                         break;
203                 }
204                 unode->urb = urb;
205
206                 buf = usb_alloc_coherent(udev, size, GFP_KERNEL,
207                                          &urb->transfer_dma);
208                 if (!buf) {
209                         kfree(unode);
210                         usb_free_urb(urb);
211                         if (size > PAGE_SIZE) {
212                                 size /= 2;
213                                 udl_free_urb_list(dev);
214                                 goto retry;
215                         }
216                         break;
217                 }
218
219                 /* urb->transfer_buffer_length set to actual before submit */
220                 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1),
221                                   buf, size, udl_urb_completion, unode);
222                 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
223
224                 list_add_tail(&unode->entry, &udl->urbs.list);
225
226                 udl->urbs.count++;
227                 udl->urbs.available++;
228         }
229
230         DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
231
232         return udl->urbs.count;
233 }
234
235 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
236 {
237         struct urb_node *unode;
238
239         assert_spin_locked(&udl->urbs.lock);
240
241         /* Wait for an in-flight buffer to complete and get re-queued */
242         if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
243                                          !udl->urbs.count ||
244                                          !list_empty(&udl->urbs.list),
245                                          udl->urbs.lock, timeout)) {
246                 DRM_INFO("wait for urb interrupted: available: %d\n",
247                          udl->urbs.available);
248                 return NULL;
249         }
250
251         if (!udl->urbs.count)
252                 return NULL;
253
254         unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
255         list_del_init(&unode->entry);
256         udl->urbs.available--;
257
258         return unode->urb;
259 }
260
261 #define GET_URB_TIMEOUT HZ
262 struct urb *udl_get_urb(struct drm_device *dev)
263 {
264         struct udl_device *udl = to_udl(dev);
265         struct urb *urb;
266
267         spin_lock_irq(&udl->urbs.lock);
268         urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
269         spin_unlock_irq(&udl->urbs.lock);
270         return urb;
271 }
272
273 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
274 {
275         struct udl_device *udl = to_udl(dev);
276         int ret;
277
278         if (WARN_ON(len > udl->urbs.size)) {
279                 ret = -EINVAL;
280                 goto error;
281         }
282         urb->transfer_buffer_length = len; /* set to actual payload len */
283         ret = usb_submit_urb(urb, GFP_ATOMIC);
284  error:
285         if (ret) {
286                 udl_urb_completion(urb); /* because no one else will */
287                 DRM_ERROR("usb_submit_urb error %x\n", ret);
288         }
289         return ret;
290 }
291
292 /* wait until all pending URBs have been processed */
293 void udl_sync_pending_urbs(struct drm_device *dev)
294 {
295         struct udl_device *udl = to_udl(dev);
296
297         spin_lock_irq(&udl->urbs.lock);
298         /* 2 seconds as a sane timeout */
299         if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
300                                          udl->urbs.available == udl->urbs.count,
301                                          udl->urbs.lock,
302                                          msecs_to_jiffies(2000)))
303                 drm_err(dev, "Timeout for syncing pending URBs\n");
304         spin_unlock_irq(&udl->urbs.lock);
305 }
306
307 int udl_init(struct udl_device *udl)
308 {
309         struct drm_device *dev = &udl->drm;
310         int ret = -ENOMEM;
311
312         DRM_DEBUG("\n");
313
314         udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
315         if (!udl->dmadev)
316                 drm_warn(dev, "buffer sharing not supported"); /* not an error */
317
318         mutex_init(&udl->gem_lock);
319
320         if (!udl_parse_vendor_descriptor(udl)) {
321                 ret = -ENODEV;
322                 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
323                 goto err;
324         }
325
326         if (udl_select_std_channel(udl))
327                 DRM_ERROR("Selecting channel failed\n");
328
329         if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
330                 DRM_ERROR("udl_alloc_urb_list failed\n");
331                 goto err;
332         }
333
334         DRM_DEBUG("\n");
335         ret = udl_modeset_init(dev);
336         if (ret)
337                 goto err;
338
339         drm_kms_helper_poll_init(dev);
340
341         return 0;
342
343 err:
344         if (udl->urbs.count)
345                 udl_free_urb_list(dev);
346         put_device(udl->dmadev);
347         DRM_ERROR("%d\n", ret);
348         return ret;
349 }
350
351 int udl_drop_usb(struct drm_device *dev)
352 {
353         struct udl_device *udl = to_udl(dev);
354
355         udl_free_urb_list(dev);
356         put_device(udl->dmadev);
357         udl->dmadev = NULL;
358
359         return 0;
360 }