xref: /linux/drivers/gpu/drm/udl/udl_main.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat
4  *
5  * based in parts on udlfb.c:
6  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
7  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
8  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9  */
10 
11 #include <linux/unaligned.h>
12 
13 #include <drm/drm.h>
14 #include <drm/drm_print.h>
15 #include <drm/drm_probe_helper.h>
16 
17 #include "udl_drv.h"
18 
19 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
20 #define BULK_SIZE 512
21 
22 #define NR_USB_REQUEST_CHANNEL 0x12
23 
24 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
25 #define WRITES_IN_FLIGHT (20)
26 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
27 
28 #define UDL_SKU_PIXEL_LIMIT_DEFAULT	2080000
29 
30 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
31 
32 /*
33  * Try to make sense of whatever we parse. Therefore return @end on
34  * errors, but don't fail hard.
35  */
udl_parse_key_value_pair(struct udl_device * udl,const u8 * pos,const u8 * end)36 static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
37 {
38 	u16 key;
39 	u8 len;
40 
41 	/* read key */
42 	if (pos >= end - 2)
43 		return end;
44 	key = get_unaligned_le16(pos);
45 	pos += 2;
46 
47 	/* read value length */
48 	if (pos >= end - 1)
49 		return end;
50 	len = *pos++;
51 
52 	/* read value */
53 	if (pos >= end - len)
54 		return end;
55 	switch (key) {
56 	case 0x0200: { /* maximum number of pixels */
57 		unsigned int sku_pixel_limit;
58 
59 		if (len < sizeof(__le32))
60 			break;
61 		sku_pixel_limit = get_unaligned_le32(pos);
62 		if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
63 			break; /* almost 100 MiB, so probably bogus */
64 		udl->sku_pixel_limit = sku_pixel_limit;
65 		break;
66 	}
67 	default:
68 		break;
69 	}
70 	pos += len;
71 
72 	return pos;
73 }
74 
udl_parse_vendor_descriptor(struct udl_device * udl)75 static int udl_parse_vendor_descriptor(struct udl_device *udl)
76 {
77 	struct drm_device *dev = &udl->drm;
78 	struct usb_device *udev = udl_to_usb_device(udl);
79 	bool detected = false;
80 	void *buf;
81 	int ret;
82 	unsigned int len;
83 	const u8 *desc;
84 	const u8 *desc_end;
85 
86 	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
87 	if (!buf)
88 		return -ENOMEM;
89 
90 	ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
91 				 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
92 	if (ret < 0)
93 		goto out;
94 	len = ret;
95 
96 	if (len < 5)
97 		goto out;
98 
99 	desc = buf;
100 	desc_end = desc + len;
101 
102 	if ((desc[0] != len) ||    /* descriptor length */
103 	    (desc[1] != 0x5f) ||   /* vendor descriptor type */
104 	    (desc[2] != 0x01) ||   /* version (2 bytes) */
105 	    (desc[3] != 0x00) ||
106 	    (desc[4] != len - 2))  /* length after type */
107 		goto out;
108 	desc += 5;
109 
110 	detected = true;
111 
112 	while (desc < desc_end)
113 		desc = udl_parse_key_value_pair(udl, desc, desc_end);
114 
115 out:
116 	if (!detected)
117 		drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
118 	kfree(buf);
119 
120 	return 0;
121 }
122 
123 /*
124  * Need to ensure a channel is selected before submitting URBs
125  */
udl_select_std_channel(struct udl_device * udl)126 int udl_select_std_channel(struct udl_device *udl)
127 {
128 	static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
129 					 0x1C, 0x88, 0x5E, 0x15,
130 					 0x60, 0xFE, 0xC6, 0x97,
131 					 0x16, 0x3D, 0x47, 0xF2};
132 
133 	void *sendbuf;
134 	int ret;
135 	struct usb_device *udev = udl_to_usb_device(udl);
136 
137 	sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
138 	if (!sendbuf)
139 		return -ENOMEM;
140 
141 	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
142 			      NR_USB_REQUEST_CHANNEL,
143 			      (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
144 			      sendbuf, sizeof(set_def_chn),
145 			      USB_CTRL_SET_TIMEOUT);
146 	kfree(sendbuf);
147 	return ret < 0 ? ret : 0;
148 }
149 
udl_urb_completion(struct urb * urb)150 void udl_urb_completion(struct urb *urb)
151 {
152 	struct urb_node *unode = urb->context;
153 	struct udl_device *udl = unode->dev;
154 	unsigned long flags;
155 
156 	/* sync/async unlink faults aren't errors */
157 	if (urb->status) {
158 		if (!(urb->status == -ENOENT ||
159 		    urb->status == -ECONNRESET ||
160 		    urb->status == -EPROTO ||
161 		    urb->status == -ESHUTDOWN)) {
162 			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
163 				__func__, urb->status);
164 		}
165 	}
166 
167 	urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
168 
169 	spin_lock_irqsave(&udl->urbs.lock, flags);
170 	list_add_tail(&unode->entry, &udl->urbs.list);
171 	udl->urbs.available++;
172 	spin_unlock_irqrestore(&udl->urbs.lock, flags);
173 
174 	wake_up(&udl->urbs.sleep);
175 }
176 
udl_free_urb_list(struct udl_device * udl)177 static void udl_free_urb_list(struct udl_device *udl)
178 {
179 	struct urb_node *unode;
180 	struct urb *urb;
181 
182 	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
183 
184 	/* keep waiting and freeing, until we've got 'em all */
185 	while (udl->urbs.count) {
186 		spin_lock_irq(&udl->urbs.lock);
187 		urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
188 		udl->urbs.count--;
189 		spin_unlock_irq(&udl->urbs.lock);
190 		if (WARN_ON(!urb))
191 			break;
192 		unode = urb->context;
193 		/* Free each separately allocated piece */
194 		usb_free_coherent(urb->dev, udl->urbs.size,
195 				  urb->transfer_buffer, urb->transfer_dma);
196 		usb_free_urb(urb);
197 		kfree(unode);
198 	}
199 
200 	wake_up_all(&udl->urbs.sleep);
201 }
202 
udl_alloc_urb_list(struct udl_device * udl,int count,size_t size)203 static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
204 {
205 	struct urb *urb;
206 	struct urb_node *unode;
207 	char *buf;
208 	size_t wanted_size = count * size;
209 	struct usb_device *udev = udl_to_usb_device(udl);
210 
211 	spin_lock_init(&udl->urbs.lock);
212 	INIT_LIST_HEAD(&udl->urbs.list);
213 	init_waitqueue_head(&udl->urbs.sleep);
214 	udl->urbs.count = 0;
215 	udl->urbs.available = 0;
216 
217 retry:
218 	udl->urbs.size = size;
219 
220 	while (udl->urbs.count * size < wanted_size) {
221 		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
222 		if (!unode)
223 			break;
224 		unode->dev = udl;
225 
226 		urb = usb_alloc_urb(0, GFP_KERNEL);
227 		if (!urb) {
228 			kfree(unode);
229 			break;
230 		}
231 		unode->urb = urb;
232 
233 		buf = usb_alloc_coherent(udev, size, GFP_KERNEL,
234 					 &urb->transfer_dma);
235 		if (!buf) {
236 			kfree(unode);
237 			usb_free_urb(urb);
238 			if (size > PAGE_SIZE) {
239 				size /= 2;
240 				udl_free_urb_list(udl);
241 				goto retry;
242 			}
243 			break;
244 		}
245 
246 		/* urb->transfer_buffer_length set to actual before submit */
247 		usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1),
248 				  buf, size, udl_urb_completion, unode);
249 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
250 
251 		list_add_tail(&unode->entry, &udl->urbs.list);
252 
253 		udl->urbs.count++;
254 		udl->urbs.available++;
255 	}
256 
257 	DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
258 
259 	return udl->urbs.count;
260 }
261 
udl_get_urb_locked(struct udl_device * udl,long timeout)262 static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
263 {
264 	struct urb_node *unode;
265 
266 	assert_spin_locked(&udl->urbs.lock);
267 
268 	/* Wait for an in-flight buffer to complete and get re-queued */
269 	if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
270 					 !udl->urbs.count ||
271 					 !list_empty(&udl->urbs.list),
272 					 udl->urbs.lock, timeout)) {
273 		DRM_INFO("wait for urb interrupted: available: %d\n",
274 			 udl->urbs.available);
275 		return NULL;
276 	}
277 
278 	if (!udl->urbs.count)
279 		return NULL;
280 
281 	unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
282 	list_del_init(&unode->entry);
283 	udl->urbs.available--;
284 
285 	return unode->urb;
286 }
287 
288 #define GET_URB_TIMEOUT	HZ
udl_get_urb(struct udl_device * udl)289 struct urb *udl_get_urb(struct udl_device *udl)
290 {
291 	struct urb *urb;
292 
293 	spin_lock_irq(&udl->urbs.lock);
294 	urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
295 	spin_unlock_irq(&udl->urbs.lock);
296 	return urb;
297 }
298 
udl_submit_urb(struct udl_device * udl,struct urb * urb,size_t len)299 int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
300 {
301 	int ret;
302 
303 	if (WARN_ON(len > udl->urbs.size)) {
304 		ret = -EINVAL;
305 		goto error;
306 	}
307 	urb->transfer_buffer_length = len; /* set to actual payload len */
308 	ret = usb_submit_urb(urb, GFP_ATOMIC);
309  error:
310 	if (ret) {
311 		udl_urb_completion(urb); /* because no one else will */
312 		DRM_ERROR("usb_submit_urb error %x\n", ret);
313 	}
314 	return ret;
315 }
316 
317 /* wait until all pending URBs have been processed */
udl_sync_pending_urbs(struct udl_device * udl)318 void udl_sync_pending_urbs(struct udl_device *udl)
319 {
320 	struct drm_device *dev = &udl->drm;
321 
322 	spin_lock_irq(&udl->urbs.lock);
323 	/* 2 seconds as a sane timeout */
324 	if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
325 					 udl->urbs.available == udl->urbs.count,
326 					 udl->urbs.lock,
327 					 msecs_to_jiffies(2000)))
328 		drm_err(dev, "Timeout for syncing pending URBs\n");
329 	spin_unlock_irq(&udl->urbs.lock);
330 }
331 
udl_init(struct udl_device * udl)332 int udl_init(struct udl_device *udl)
333 {
334 	struct drm_device *dev = &udl->drm;
335 	int ret = -ENOMEM;
336 	struct device *dma_dev;
337 
338 	DRM_DEBUG("\n");
339 
340 	dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
341 	if (dma_dev) {
342 		drm_dev_set_dma_dev(dev, dma_dev);
343 		put_device(dma_dev);
344 	} else {
345 		drm_warn(dev, "buffer sharing not supported"); /* not an error */
346 	}
347 
348 	/*
349 	 * Not all devices provide vendor descriptors with device
350 	 * information. Initialize to default values of real-world
351 	 * devices. It is just enough memory for FullHD.
352 	 */
353 	udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
354 
355 	ret = udl_parse_vendor_descriptor(udl);
356 	if (ret)
357 		goto err;
358 
359 	if (udl_select_std_channel(udl))
360 		DRM_ERROR("Selecting channel failed\n");
361 
362 	if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
363 		DRM_ERROR("udl_alloc_urb_list failed\n");
364 		ret = -ENOMEM;
365 		goto err;
366 	}
367 
368 	DRM_DEBUG("\n");
369 	ret = udl_modeset_init(udl);
370 	if (ret)
371 		goto err;
372 
373 	return 0;
374 
375 err:
376 	if (udl->urbs.count)
377 		udl_free_urb_list(udl);
378 	DRM_ERROR("%d\n", ret);
379 	return ret;
380 }
381 
udl_drop_usb(struct udl_device * udl)382 int udl_drop_usb(struct udl_device *udl)
383 {
384 	udl_free_urb_list(udl);
385 
386 	return 0;
387 }
388