1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Copyright (c) 2020 Google, Inc.
7 * Author: Andrey Konovalov <andreyknvl@gmail.com>
8 */
9
10 #include <linux/compiler.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/idr.h>
15 #include <linux/kref.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/semaphore.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include <linux/usb.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/ch11.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/composite.h>
29
30 #include <uapi/linux/usb/raw_gadget.h>
31
32 #define DRIVER_DESC "USB Raw Gadget"
33 #define DRIVER_NAME "raw-gadget"
34
35 MODULE_DESCRIPTION(DRIVER_DESC);
36 MODULE_AUTHOR("Andrey Konovalov");
37 MODULE_LICENSE("GPL");
38
39 /*----------------------------------------------------------------------*/
40
41 static DEFINE_IDA(driver_id_numbers);
42 #define DRIVER_DRIVER_NAME_LENGTH_MAX 32
43
44 #define RAW_EVENT_QUEUE_SIZE 16
45
46 struct raw_event_queue {
47 /* See the comment in raw_event_queue_fetch() for locking details. */
48 spinlock_t lock;
49 struct semaphore sema;
50 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
51 int size;
52 };
53
raw_event_queue_init(struct raw_event_queue * queue)54 static void raw_event_queue_init(struct raw_event_queue *queue)
55 {
56 spin_lock_init(&queue->lock);
57 sema_init(&queue->sema, 0);
58 queue->size = 0;
59 }
60
raw_event_queue_add(struct raw_event_queue * queue,enum usb_raw_event_type type,size_t length,const void * data)61 static int raw_event_queue_add(struct raw_event_queue *queue,
62 enum usb_raw_event_type type, size_t length, const void *data)
63 {
64 unsigned long flags;
65 struct usb_raw_event *event;
66
67 spin_lock_irqsave(&queue->lock, flags);
68 if (queue->size >= RAW_EVENT_QUEUE_SIZE) {
69 spin_unlock_irqrestore(&queue->lock, flags);
70 return -ENOMEM;
71 }
72 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
73 if (!event) {
74 spin_unlock_irqrestore(&queue->lock, flags);
75 return -ENOMEM;
76 }
77 event->type = type;
78 event->length = length;
79 if (event->length)
80 memcpy(&event->data[0], data, length);
81 queue->events[queue->size] = event;
82 queue->size++;
83 up(&queue->sema);
84 spin_unlock_irqrestore(&queue->lock, flags);
85 return 0;
86 }
87
raw_event_queue_fetch(struct raw_event_queue * queue)88 static struct usb_raw_event *raw_event_queue_fetch(
89 struct raw_event_queue *queue)
90 {
91 int ret;
92 unsigned long flags;
93 struct usb_raw_event *event;
94
95 /*
96 * This function can be called concurrently. We first check that
97 * there's at least one event queued by decrementing the semaphore,
98 * and then take the lock to protect queue struct fields.
99 */
100 ret = down_interruptible(&queue->sema);
101 if (ret)
102 return ERR_PTR(ret);
103 spin_lock_irqsave(&queue->lock, flags);
104 /*
105 * queue->size must have the same value as queue->sema counter (before
106 * the down_interruptible() call above), so this check is a fail-safe.
107 */
108 if (WARN_ON(!queue->size)) {
109 spin_unlock_irqrestore(&queue->lock, flags);
110 return ERR_PTR(-ENODEV);
111 }
112 event = queue->events[0];
113 queue->size--;
114 memmove(&queue->events[0], &queue->events[1],
115 queue->size * sizeof(queue->events[0]));
116 spin_unlock_irqrestore(&queue->lock, flags);
117 return event;
118 }
119
raw_event_queue_destroy(struct raw_event_queue * queue)120 static void raw_event_queue_destroy(struct raw_event_queue *queue)
121 {
122 int i;
123
124 for (i = 0; i < queue->size; i++)
125 kfree(queue->events[i]);
126 queue->size = 0;
127 }
128
129 /*----------------------------------------------------------------------*/
130
131 struct raw_dev;
132
133 enum ep_state {
134 STATE_EP_DISABLED,
135 STATE_EP_ENABLED,
136 };
137
138 struct raw_ep {
139 struct raw_dev *dev;
140 enum ep_state state;
141 struct usb_ep *ep;
142 u8 addr;
143 struct usb_request *req;
144 bool urb_queued;
145 bool disabling;
146 ssize_t status;
147 };
148
149 enum dev_state {
150 STATE_DEV_INVALID = 0,
151 STATE_DEV_OPENED,
152 STATE_DEV_INITIALIZED,
153 STATE_DEV_REGISTERING,
154 STATE_DEV_RUNNING,
155 STATE_DEV_CLOSED,
156 STATE_DEV_FAILED
157 };
158
159 struct raw_dev {
160 struct kref count;
161 spinlock_t lock;
162
163 const char *udc_name;
164 struct usb_gadget_driver driver;
165
166 /* Reference to misc device: */
167 struct device *dev;
168
169 /* Make driver names unique */
170 int driver_id_number;
171
172 /* Protected by lock: */
173 enum dev_state state;
174 bool gadget_registered;
175 struct usb_gadget *gadget;
176 struct usb_request *req;
177 bool ep0_in_pending;
178 bool ep0_out_pending;
179 bool ep0_urb_queued;
180 ssize_t ep0_status;
181 struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
182 int eps_num;
183
184 struct completion ep0_done;
185 struct raw_event_queue queue;
186 };
187
dev_new(void)188 static struct raw_dev *dev_new(void)
189 {
190 struct raw_dev *dev;
191
192 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
193 if (!dev)
194 return NULL;
195 /* Matches kref_put() in raw_release(). */
196 kref_init(&dev->count);
197 spin_lock_init(&dev->lock);
198 init_completion(&dev->ep0_done);
199 raw_event_queue_init(&dev->queue);
200 dev->driver_id_number = -1;
201 return dev;
202 }
203
dev_free(struct kref * kref)204 static void dev_free(struct kref *kref)
205 {
206 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
207 int i;
208
209 kfree(dev->udc_name);
210 kfree(dev->driver.udc_name);
211 kfree(dev->driver.driver.name);
212 if (dev->driver_id_number >= 0)
213 ida_free(&driver_id_numbers, dev->driver_id_number);
214 if (dev->req) {
215 if (dev->ep0_urb_queued)
216 usb_ep_dequeue(dev->gadget->ep0, dev->req);
217 usb_ep_free_request(dev->gadget->ep0, dev->req);
218 }
219 raw_event_queue_destroy(&dev->queue);
220 for (i = 0; i < dev->eps_num; i++) {
221 if (dev->eps[i].state == STATE_EP_DISABLED)
222 continue;
223 usb_ep_disable(dev->eps[i].ep);
224 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
225 kfree(dev->eps[i].ep->desc);
226 dev->eps[i].state = STATE_EP_DISABLED;
227 }
228 kfree(dev);
229 }
230
231 /*----------------------------------------------------------------------*/
232
raw_queue_event(struct raw_dev * dev,enum usb_raw_event_type type,size_t length,const void * data)233 static int raw_queue_event(struct raw_dev *dev,
234 enum usb_raw_event_type type, size_t length, const void *data)
235 {
236 int ret = 0;
237 unsigned long flags;
238
239 ret = raw_event_queue_add(&dev->queue, type, length, data);
240 if (ret < 0) {
241 spin_lock_irqsave(&dev->lock, flags);
242 dev->state = STATE_DEV_FAILED;
243 spin_unlock_irqrestore(&dev->lock, flags);
244 }
245 return ret;
246 }
247
gadget_ep0_complete(struct usb_ep * ep,struct usb_request * req)248 static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
249 {
250 struct raw_dev *dev = req->context;
251 unsigned long flags;
252
253 spin_lock_irqsave(&dev->lock, flags);
254 if (req->status)
255 dev->ep0_status = req->status;
256 else
257 dev->ep0_status = req->actual;
258 if (dev->ep0_in_pending)
259 dev->ep0_in_pending = false;
260 else
261 dev->ep0_out_pending = false;
262 spin_unlock_irqrestore(&dev->lock, flags);
263
264 complete(&dev->ep0_done);
265 }
266
get_ep_addr(const char * name)267 static u8 get_ep_addr(const char *name)
268 {
269 /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
270 * parse the endpoint address from its name. We deliberately use
271 * deprecated simple_strtoul() function here, as the number isn't
272 * followed by '\0' nor '\n'.
273 */
274 if (isdigit(name[2]))
275 return simple_strtoul(&name[2], NULL, 10);
276 /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
277 return USB_RAW_EP_ADDR_ANY;
278 }
279
gadget_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)280 static int gadget_bind(struct usb_gadget *gadget,
281 struct usb_gadget_driver *driver)
282 {
283 int ret = 0, i = 0;
284 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
285 struct usb_request *req;
286 struct usb_ep *ep;
287 unsigned long flags;
288
289 if (strcmp(gadget->name, dev->udc_name) != 0)
290 return -ENODEV;
291
292 set_gadget_data(gadget, dev);
293 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
294 if (!req) {
295 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
296 set_gadget_data(gadget, NULL);
297 return -ENOMEM;
298 }
299
300 spin_lock_irqsave(&dev->lock, flags);
301 dev->req = req;
302 dev->req->context = dev;
303 dev->req->complete = gadget_ep0_complete;
304 dev->gadget = gadget;
305 gadget_for_each_ep(ep, dev->gadget) {
306 dev->eps[i].ep = ep;
307 dev->eps[i].addr = get_ep_addr(ep->name);
308 dev->eps[i].state = STATE_EP_DISABLED;
309 i++;
310 }
311 dev->eps_num = i;
312 spin_unlock_irqrestore(&dev->lock, flags);
313
314 dev_dbg(&gadget->dev, "gadget connected\n");
315 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
316 if (ret < 0) {
317 dev_err(&gadget->dev, "failed to queue connect event\n");
318 set_gadget_data(gadget, NULL);
319 return ret;
320 }
321
322 /* Matches kref_put() in gadget_unbind(). */
323 kref_get(&dev->count);
324 return ret;
325 }
326
gadget_unbind(struct usb_gadget * gadget)327 static void gadget_unbind(struct usb_gadget *gadget)
328 {
329 struct raw_dev *dev = get_gadget_data(gadget);
330
331 set_gadget_data(gadget, NULL);
332 /* Matches kref_get() in gadget_bind(). */
333 kref_put(&dev->count, dev_free);
334 }
335
gadget_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)336 static int gadget_setup(struct usb_gadget *gadget,
337 const struct usb_ctrlrequest *ctrl)
338 {
339 int ret = 0;
340 struct raw_dev *dev = get_gadget_data(gadget);
341 unsigned long flags;
342
343 spin_lock_irqsave(&dev->lock, flags);
344 if (dev->state != STATE_DEV_RUNNING) {
345 dev_err(&gadget->dev, "ignoring, device is not running\n");
346 ret = -ENODEV;
347 goto out_unlock;
348 }
349 if (dev->ep0_in_pending || dev->ep0_out_pending) {
350 dev_dbg(&gadget->dev, "stalling, request already pending\n");
351 ret = -EBUSY;
352 goto out_unlock;
353 }
354 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
355 dev->ep0_in_pending = true;
356 else
357 dev->ep0_out_pending = true;
358 spin_unlock_irqrestore(&dev->lock, flags);
359
360 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
361 if (ret < 0)
362 dev_err(&gadget->dev, "failed to queue control event\n");
363 goto out;
364
365 out_unlock:
366 spin_unlock_irqrestore(&dev->lock, flags);
367 out:
368 if (ret == 0 && ctrl->wLength == 0) {
369 /*
370 * Return USB_GADGET_DELAYED_STATUS as a workaround to stop
371 * some UDC drivers (e.g. dwc3) from automatically proceeding
372 * with the status stage for 0-length transfers.
373 * Should be removed once all UDC drivers are fixed to always
374 * delay the status stage until a response is queued to EP0.
375 */
376 return USB_GADGET_DELAYED_STATUS;
377 }
378 return ret;
379 }
380
gadget_disconnect(struct usb_gadget * gadget)381 static void gadget_disconnect(struct usb_gadget *gadget)
382 {
383 struct raw_dev *dev = get_gadget_data(gadget);
384 int ret;
385
386 dev_dbg(&gadget->dev, "gadget disconnected\n");
387 ret = raw_queue_event(dev, USB_RAW_EVENT_DISCONNECT, 0, NULL);
388 if (ret < 0)
389 dev_err(&gadget->dev, "failed to queue disconnect event\n");
390 }
gadget_suspend(struct usb_gadget * gadget)391 static void gadget_suspend(struct usb_gadget *gadget)
392 {
393 struct raw_dev *dev = get_gadget_data(gadget);
394 int ret;
395
396 dev_dbg(&gadget->dev, "gadget suspended\n");
397 ret = raw_queue_event(dev, USB_RAW_EVENT_SUSPEND, 0, NULL);
398 if (ret < 0)
399 dev_err(&gadget->dev, "failed to queue suspend event\n");
400 }
gadget_resume(struct usb_gadget * gadget)401 static void gadget_resume(struct usb_gadget *gadget)
402 {
403 struct raw_dev *dev = get_gadget_data(gadget);
404 int ret;
405
406 dev_dbg(&gadget->dev, "gadget resumed\n");
407 ret = raw_queue_event(dev, USB_RAW_EVENT_RESUME, 0, NULL);
408 if (ret < 0)
409 dev_err(&gadget->dev, "failed to queue resume event\n");
410 }
gadget_reset(struct usb_gadget * gadget)411 static void gadget_reset(struct usb_gadget *gadget)
412 {
413 struct raw_dev *dev = get_gadget_data(gadget);
414 int ret;
415
416 dev_dbg(&gadget->dev, "gadget reset\n");
417 ret = raw_queue_event(dev, USB_RAW_EVENT_RESET, 0, NULL);
418 if (ret < 0)
419 dev_err(&gadget->dev, "failed to queue reset event\n");
420 }
421
422 /*----------------------------------------------------------------------*/
423
424 static struct miscdevice raw_misc_device;
425
raw_open(struct inode * inode,struct file * fd)426 static int raw_open(struct inode *inode, struct file *fd)
427 {
428 struct raw_dev *dev;
429
430 /* Nonblocking I/O is not supported yet. */
431 if (fd->f_flags & O_NONBLOCK)
432 return -EINVAL;
433
434 dev = dev_new();
435 if (!dev)
436 return -ENOMEM;
437 fd->private_data = dev;
438 dev->state = STATE_DEV_OPENED;
439 dev->dev = raw_misc_device.this_device;
440 return 0;
441 }
442
raw_release(struct inode * inode,struct file * fd)443 static int raw_release(struct inode *inode, struct file *fd)
444 {
445 int ret = 0;
446 struct raw_dev *dev = fd->private_data;
447 unsigned long flags;
448 bool unregister = false;
449
450 spin_lock_irqsave(&dev->lock, flags);
451 dev->state = STATE_DEV_CLOSED;
452 if (!dev->gadget) {
453 spin_unlock_irqrestore(&dev->lock, flags);
454 goto out_put;
455 }
456 if (dev->gadget_registered)
457 unregister = true;
458 dev->gadget_registered = false;
459 spin_unlock_irqrestore(&dev->lock, flags);
460
461 if (unregister) {
462 ret = usb_gadget_unregister_driver(&dev->driver);
463 if (ret != 0)
464 dev_err(dev->dev,
465 "usb_gadget_unregister_driver() failed with %d\n",
466 ret);
467 /* Matches kref_get() in raw_ioctl_run(). */
468 kref_put(&dev->count, dev_free);
469 }
470
471 out_put:
472 /* Matches dev_new() in raw_open(). */
473 kref_put(&dev->count, dev_free);
474 return ret;
475 }
476
477 /*----------------------------------------------------------------------*/
478
raw_ioctl_init(struct raw_dev * dev,unsigned long value)479 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
480 {
481 int ret = 0;
482 int driver_id_number;
483 struct usb_raw_init arg;
484 char *udc_driver_name;
485 char *udc_device_name;
486 char *driver_driver_name;
487 unsigned long flags;
488
489 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
490 return -EFAULT;
491
492 switch (arg.speed) {
493 case USB_SPEED_UNKNOWN:
494 arg.speed = USB_SPEED_HIGH;
495 break;
496 case USB_SPEED_LOW:
497 case USB_SPEED_FULL:
498 case USB_SPEED_HIGH:
499 case USB_SPEED_SUPER:
500 break;
501 default:
502 return -EINVAL;
503 }
504
505 driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
506 if (driver_id_number < 0)
507 return driver_id_number;
508
509 driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
510 if (!driver_driver_name) {
511 ret = -ENOMEM;
512 goto out_free_driver_id_number;
513 }
514 snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
515 DRIVER_NAME ".%d", driver_id_number);
516
517 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
518 if (!udc_driver_name) {
519 ret = -ENOMEM;
520 goto out_free_driver_driver_name;
521 }
522 ret = strscpy(udc_driver_name, &arg.driver_name[0],
523 UDC_NAME_LENGTH_MAX);
524 if (ret < 0)
525 goto out_free_udc_driver_name;
526 ret = 0;
527
528 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
529 if (!udc_device_name) {
530 ret = -ENOMEM;
531 goto out_free_udc_driver_name;
532 }
533 ret = strscpy(udc_device_name, &arg.device_name[0],
534 UDC_NAME_LENGTH_MAX);
535 if (ret < 0)
536 goto out_free_udc_device_name;
537 ret = 0;
538
539 spin_lock_irqsave(&dev->lock, flags);
540 if (dev->state != STATE_DEV_OPENED) {
541 dev_dbg(dev->dev, "fail, device is not opened\n");
542 ret = -EINVAL;
543 goto out_unlock;
544 }
545 dev->udc_name = udc_driver_name;
546
547 dev->driver.function = DRIVER_DESC;
548 dev->driver.max_speed = arg.speed;
549 dev->driver.setup = gadget_setup;
550 dev->driver.disconnect = gadget_disconnect;
551 dev->driver.bind = gadget_bind;
552 dev->driver.unbind = gadget_unbind;
553 dev->driver.suspend = gadget_suspend;
554 dev->driver.resume = gadget_resume;
555 dev->driver.reset = gadget_reset;
556 dev->driver.driver.name = driver_driver_name;
557 dev->driver.udc_name = udc_device_name;
558 dev->driver.match_existing_only = 1;
559 dev->driver_id_number = driver_id_number;
560
561 dev->state = STATE_DEV_INITIALIZED;
562 spin_unlock_irqrestore(&dev->lock, flags);
563 return ret;
564
565 out_unlock:
566 spin_unlock_irqrestore(&dev->lock, flags);
567 out_free_udc_device_name:
568 kfree(udc_device_name);
569 out_free_udc_driver_name:
570 kfree(udc_driver_name);
571 out_free_driver_driver_name:
572 kfree(driver_driver_name);
573 out_free_driver_id_number:
574 ida_free(&driver_id_numbers, driver_id_number);
575 return ret;
576 }
577
raw_ioctl_run(struct raw_dev * dev,unsigned long value)578 static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
579 {
580 int ret = 0;
581 unsigned long flags;
582
583 if (value)
584 return -EINVAL;
585
586 spin_lock_irqsave(&dev->lock, flags);
587 if (dev->state != STATE_DEV_INITIALIZED) {
588 dev_dbg(dev->dev, "fail, device is not initialized\n");
589 ret = -EINVAL;
590 goto out_unlock;
591 }
592 dev->state = STATE_DEV_REGISTERING;
593 spin_unlock_irqrestore(&dev->lock, flags);
594
595 ret = usb_gadget_register_driver(&dev->driver);
596
597 spin_lock_irqsave(&dev->lock, flags);
598 if (ret) {
599 dev_err(dev->dev,
600 "fail, usb_gadget_register_driver returned %d\n", ret);
601 dev->state = STATE_DEV_FAILED;
602 goto out_unlock;
603 }
604 dev->gadget_registered = true;
605 dev->state = STATE_DEV_RUNNING;
606 /* Matches kref_put() in raw_release(). */
607 kref_get(&dev->count);
608
609 out_unlock:
610 spin_unlock_irqrestore(&dev->lock, flags);
611 return ret;
612 }
613
raw_ioctl_event_fetch(struct raw_dev * dev,unsigned long value)614 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
615 {
616 struct usb_raw_event arg;
617 unsigned long flags;
618 struct usb_raw_event *event;
619 uint32_t length;
620
621 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
622 return -EFAULT;
623
624 spin_lock_irqsave(&dev->lock, flags);
625 if (dev->state != STATE_DEV_RUNNING) {
626 dev_dbg(dev->dev, "fail, device is not running\n");
627 spin_unlock_irqrestore(&dev->lock, flags);
628 return -EINVAL;
629 }
630 if (!dev->gadget) {
631 dev_dbg(dev->dev, "fail, gadget is not bound\n");
632 spin_unlock_irqrestore(&dev->lock, flags);
633 return -EBUSY;
634 }
635 spin_unlock_irqrestore(&dev->lock, flags);
636
637 event = raw_event_queue_fetch(&dev->queue);
638 if (PTR_ERR(event) == -EINTR) {
639 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
640 return -EINTR;
641 }
642 if (IS_ERR(event)) {
643 dev_err(&dev->gadget->dev, "failed to fetch event\n");
644 spin_lock_irqsave(&dev->lock, flags);
645 dev->state = STATE_DEV_FAILED;
646 spin_unlock_irqrestore(&dev->lock, flags);
647 return -ENODEV;
648 }
649 length = min(arg.length, event->length);
650 if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
651 kfree(event);
652 return -EFAULT;
653 }
654
655 kfree(event);
656 return 0;
657 }
658
raw_alloc_io_data(struct usb_raw_ep_io * io,void __user * ptr,bool get_from_user)659 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
660 bool get_from_user)
661 {
662 void *data;
663
664 if (copy_from_user(io, ptr, sizeof(*io)))
665 return ERR_PTR(-EFAULT);
666 if (io->ep >= USB_RAW_EPS_NUM_MAX)
667 return ERR_PTR(-EINVAL);
668 if (!usb_raw_io_flags_valid(io->flags))
669 return ERR_PTR(-EINVAL);
670 if (io->length > PAGE_SIZE)
671 return ERR_PTR(-EINVAL);
672 if (get_from_user)
673 data = memdup_user(ptr + sizeof(*io), io->length);
674 else {
675 data = kmalloc(io->length, GFP_KERNEL);
676 if (!data)
677 data = ERR_PTR(-ENOMEM);
678 }
679 return data;
680 }
681
raw_process_ep0_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)682 static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
683 void *data, bool in)
684 {
685 int ret = 0;
686 unsigned long flags;
687
688 spin_lock_irqsave(&dev->lock, flags);
689 if (dev->state != STATE_DEV_RUNNING) {
690 dev_dbg(dev->dev, "fail, device is not running\n");
691 ret = -EINVAL;
692 goto out_unlock;
693 }
694 if (!dev->gadget) {
695 dev_dbg(dev->dev, "fail, gadget is not bound\n");
696 ret = -EBUSY;
697 goto out_unlock;
698 }
699 if (dev->ep0_urb_queued) {
700 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
701 ret = -EBUSY;
702 goto out_unlock;
703 }
704 if ((in && !dev->ep0_in_pending) ||
705 (!in && !dev->ep0_out_pending)) {
706 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
707 ret = -EBUSY;
708 goto out_unlock;
709 }
710 if (WARN_ON(in && dev->ep0_out_pending)) {
711 ret = -ENODEV;
712 dev->state = STATE_DEV_FAILED;
713 goto out_unlock;
714 }
715 if (WARN_ON(!in && dev->ep0_in_pending)) {
716 ret = -ENODEV;
717 dev->state = STATE_DEV_FAILED;
718 goto out_unlock;
719 }
720
721 dev->req->buf = data;
722 dev->req->length = io->length;
723 dev->req->zero = usb_raw_io_flags_zero(io->flags);
724 dev->ep0_urb_queued = true;
725 spin_unlock_irqrestore(&dev->lock, flags);
726
727 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
728 if (ret) {
729 dev_err(&dev->gadget->dev,
730 "fail, usb_ep_queue returned %d\n", ret);
731 spin_lock_irqsave(&dev->lock, flags);
732 goto out_queue_failed;
733 }
734
735 ret = wait_for_completion_interruptible(&dev->ep0_done);
736 if (ret) {
737 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
738 usb_ep_dequeue(dev->gadget->ep0, dev->req);
739 wait_for_completion(&dev->ep0_done);
740 spin_lock_irqsave(&dev->lock, flags);
741 if (dev->ep0_status == -ECONNRESET)
742 dev->ep0_status = -EINTR;
743 goto out_interrupted;
744 }
745
746 spin_lock_irqsave(&dev->lock, flags);
747
748 out_interrupted:
749 ret = dev->ep0_status;
750 out_queue_failed:
751 dev->ep0_urb_queued = false;
752 out_unlock:
753 spin_unlock_irqrestore(&dev->lock, flags);
754 return ret;
755 }
756
raw_ioctl_ep0_write(struct raw_dev * dev,unsigned long value)757 static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
758 {
759 int ret = 0;
760 void *data;
761 struct usb_raw_ep_io io;
762
763 data = raw_alloc_io_data(&io, (void __user *)value, true);
764 if (IS_ERR(data))
765 return PTR_ERR(data);
766 ret = raw_process_ep0_io(dev, &io, data, true);
767 kfree(data);
768 return ret;
769 }
770
raw_ioctl_ep0_read(struct raw_dev * dev,unsigned long value)771 static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
772 {
773 int ret = 0;
774 void *data;
775 struct usb_raw_ep_io io;
776 unsigned int length;
777
778 data = raw_alloc_io_data(&io, (void __user *)value, false);
779 if (IS_ERR(data))
780 return PTR_ERR(data);
781 ret = raw_process_ep0_io(dev, &io, data, false);
782 if (ret < 0)
783 goto free;
784
785 length = min(io.length, (unsigned int)ret);
786 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
787 ret = -EFAULT;
788 else
789 ret = length;
790 free:
791 kfree(data);
792 return ret;
793 }
794
raw_ioctl_ep0_stall(struct raw_dev * dev,unsigned long value)795 static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
796 {
797 int ret = 0;
798 unsigned long flags;
799
800 if (value)
801 return -EINVAL;
802 spin_lock_irqsave(&dev->lock, flags);
803 if (dev->state != STATE_DEV_RUNNING) {
804 dev_dbg(dev->dev, "fail, device is not running\n");
805 ret = -EINVAL;
806 goto out_unlock;
807 }
808 if (!dev->gadget) {
809 dev_dbg(dev->dev, "fail, gadget is not bound\n");
810 ret = -EBUSY;
811 goto out_unlock;
812 }
813 if (dev->ep0_urb_queued) {
814 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
815 ret = -EBUSY;
816 goto out_unlock;
817 }
818 if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
819 dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
820 ret = -EBUSY;
821 goto out_unlock;
822 }
823
824 ret = usb_ep_set_halt(dev->gadget->ep0);
825 if (ret < 0)
826 dev_err(&dev->gadget->dev,
827 "fail, usb_ep_set_halt returned %d\n", ret);
828
829 if (dev->ep0_in_pending)
830 dev->ep0_in_pending = false;
831 else
832 dev->ep0_out_pending = false;
833
834 out_unlock:
835 spin_unlock_irqrestore(&dev->lock, flags);
836 return ret;
837 }
838
raw_ioctl_ep_enable(struct raw_dev * dev,unsigned long value)839 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
840 {
841 int ret = 0, i;
842 unsigned long flags;
843 struct usb_endpoint_descriptor *desc;
844 struct raw_ep *ep;
845 bool ep_props_matched = false;
846
847 desc = memdup_user((void __user *)value, sizeof(*desc));
848 if (IS_ERR(desc))
849 return PTR_ERR(desc);
850
851 /*
852 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
853 * drivers.
854 */
855 if (usb_endpoint_maxp(desc) == 0) {
856 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
857 kfree(desc);
858 return -EINVAL;
859 }
860
861 spin_lock_irqsave(&dev->lock, flags);
862 if (dev->state != STATE_DEV_RUNNING) {
863 dev_dbg(dev->dev, "fail, device is not running\n");
864 ret = -EINVAL;
865 goto out_free;
866 }
867 if (!dev->gadget) {
868 dev_dbg(dev->dev, "fail, gadget is not bound\n");
869 ret = -EBUSY;
870 goto out_free;
871 }
872
873 for (i = 0; i < dev->eps_num; i++) {
874 ep = &dev->eps[i];
875 if (ep->addr != usb_endpoint_num(desc) &&
876 ep->addr != USB_RAW_EP_ADDR_ANY)
877 continue;
878 if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
879 continue;
880 ep_props_matched = true;
881 if (ep->state != STATE_EP_DISABLED)
882 continue;
883 ep->ep->desc = desc;
884 ret = usb_ep_enable(ep->ep);
885 if (ret < 0) {
886 dev_err(&dev->gadget->dev,
887 "fail, usb_ep_enable returned %d\n", ret);
888 goto out_free;
889 }
890 ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
891 if (!ep->req) {
892 dev_err(&dev->gadget->dev,
893 "fail, usb_ep_alloc_request failed\n");
894 usb_ep_disable(ep->ep);
895 ret = -ENOMEM;
896 goto out_free;
897 }
898 ep->state = STATE_EP_ENABLED;
899 ep->ep->driver_data = ep;
900 ret = i;
901 goto out_unlock;
902 }
903
904 if (!ep_props_matched) {
905 dev_dbg(&dev->gadget->dev, "fail, bad endpoint descriptor\n");
906 ret = -EINVAL;
907 } else {
908 dev_dbg(&dev->gadget->dev, "fail, no endpoints available\n");
909 ret = -EBUSY;
910 }
911
912 out_free:
913 kfree(desc);
914 out_unlock:
915 spin_unlock_irqrestore(&dev->lock, flags);
916 return ret;
917 }
918
raw_ioctl_ep_disable(struct raw_dev * dev,unsigned long value)919 static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
920 {
921 int ret = 0, i = value;
922 unsigned long flags;
923
924 spin_lock_irqsave(&dev->lock, flags);
925 if (dev->state != STATE_DEV_RUNNING) {
926 dev_dbg(dev->dev, "fail, device is not running\n");
927 ret = -EINVAL;
928 goto out_unlock;
929 }
930 if (!dev->gadget) {
931 dev_dbg(dev->dev, "fail, gadget is not bound\n");
932 ret = -EBUSY;
933 goto out_unlock;
934 }
935 if (i < 0 || i >= dev->eps_num) {
936 dev_dbg(dev->dev, "fail, invalid endpoint\n");
937 ret = -EBUSY;
938 goto out_unlock;
939 }
940 if (dev->eps[i].state == STATE_EP_DISABLED) {
941 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
942 ret = -EINVAL;
943 goto out_unlock;
944 }
945 if (dev->eps[i].disabling) {
946 dev_dbg(&dev->gadget->dev,
947 "fail, disable already in progress\n");
948 ret = -EINVAL;
949 goto out_unlock;
950 }
951 if (dev->eps[i].urb_queued) {
952 dev_dbg(&dev->gadget->dev,
953 "fail, waiting for urb completion\n");
954 ret = -EINVAL;
955 goto out_unlock;
956 }
957 dev->eps[i].disabling = true;
958 spin_unlock_irqrestore(&dev->lock, flags);
959
960 usb_ep_disable(dev->eps[i].ep);
961
962 spin_lock_irqsave(&dev->lock, flags);
963 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
964 kfree(dev->eps[i].ep->desc);
965 dev->eps[i].state = STATE_EP_DISABLED;
966 dev->eps[i].disabling = false;
967
968 out_unlock:
969 spin_unlock_irqrestore(&dev->lock, flags);
970 return ret;
971 }
972
raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev * dev,unsigned long value,bool set,bool halt)973 static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
974 unsigned long value, bool set, bool halt)
975 {
976 int ret = 0, i = value;
977 unsigned long flags;
978
979 spin_lock_irqsave(&dev->lock, flags);
980 if (dev->state != STATE_DEV_RUNNING) {
981 dev_dbg(dev->dev, "fail, device is not running\n");
982 ret = -EINVAL;
983 goto out_unlock;
984 }
985 if (!dev->gadget) {
986 dev_dbg(dev->dev, "fail, gadget is not bound\n");
987 ret = -EBUSY;
988 goto out_unlock;
989 }
990 if (i < 0 || i >= dev->eps_num) {
991 dev_dbg(dev->dev, "fail, invalid endpoint\n");
992 ret = -EBUSY;
993 goto out_unlock;
994 }
995 if (dev->eps[i].state == STATE_EP_DISABLED) {
996 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
997 ret = -EINVAL;
998 goto out_unlock;
999 }
1000 if (dev->eps[i].disabling) {
1001 dev_dbg(&dev->gadget->dev,
1002 "fail, disable is in progress\n");
1003 ret = -EINVAL;
1004 goto out_unlock;
1005 }
1006 if (dev->eps[i].urb_queued) {
1007 dev_dbg(&dev->gadget->dev,
1008 "fail, waiting for urb completion\n");
1009 ret = -EINVAL;
1010 goto out_unlock;
1011 }
1012 if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
1013 dev_dbg(&dev->gadget->dev,
1014 "fail, can't halt/wedge ISO endpoint\n");
1015 ret = -EINVAL;
1016 goto out_unlock;
1017 }
1018
1019 if (set && halt) {
1020 ret = usb_ep_set_halt(dev->eps[i].ep);
1021 if (ret < 0)
1022 dev_err(&dev->gadget->dev,
1023 "fail, usb_ep_set_halt returned %d\n", ret);
1024 } else if (!set && halt) {
1025 ret = usb_ep_clear_halt(dev->eps[i].ep);
1026 if (ret < 0)
1027 dev_err(&dev->gadget->dev,
1028 "fail, usb_ep_clear_halt returned %d\n", ret);
1029 } else if (set && !halt) {
1030 ret = usb_ep_set_wedge(dev->eps[i].ep);
1031 if (ret < 0)
1032 dev_err(&dev->gadget->dev,
1033 "fail, usb_ep_set_wedge returned %d\n", ret);
1034 }
1035
1036 out_unlock:
1037 spin_unlock_irqrestore(&dev->lock, flags);
1038 return ret;
1039 }
1040
gadget_ep_complete(struct usb_ep * ep,struct usb_request * req)1041 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
1042 {
1043 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
1044 struct raw_dev *dev = r_ep->dev;
1045 unsigned long flags;
1046
1047 spin_lock_irqsave(&dev->lock, flags);
1048 if (req->status)
1049 r_ep->status = req->status;
1050 else
1051 r_ep->status = req->actual;
1052 spin_unlock_irqrestore(&dev->lock, flags);
1053
1054 complete((struct completion *)req->context);
1055 }
1056
raw_process_ep_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)1057 static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
1058 void *data, bool in)
1059 {
1060 int ret = 0;
1061 unsigned long flags;
1062 struct raw_ep *ep;
1063 DECLARE_COMPLETION_ONSTACK(done);
1064
1065 spin_lock_irqsave(&dev->lock, flags);
1066 if (dev->state != STATE_DEV_RUNNING) {
1067 dev_dbg(dev->dev, "fail, device is not running\n");
1068 ret = -EINVAL;
1069 goto out_unlock;
1070 }
1071 if (!dev->gadget) {
1072 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1073 ret = -EBUSY;
1074 goto out_unlock;
1075 }
1076 if (io->ep >= dev->eps_num) {
1077 dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
1078 ret = -EINVAL;
1079 goto out_unlock;
1080 }
1081 ep = &dev->eps[io->ep];
1082 if (ep->state != STATE_EP_ENABLED) {
1083 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
1084 ret = -EBUSY;
1085 goto out_unlock;
1086 }
1087 if (ep->disabling) {
1088 dev_dbg(&dev->gadget->dev,
1089 "fail, endpoint is already being disabled\n");
1090 ret = -EBUSY;
1091 goto out_unlock;
1092 }
1093 if (ep->urb_queued) {
1094 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
1095 ret = -EBUSY;
1096 goto out_unlock;
1097 }
1098 if (in != usb_endpoint_dir_in(ep->ep->desc)) {
1099 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
1100 ret = -EINVAL;
1101 goto out_unlock;
1102 }
1103
1104 ep->dev = dev;
1105 ep->req->context = &done;
1106 ep->req->complete = gadget_ep_complete;
1107 ep->req->buf = data;
1108 ep->req->length = io->length;
1109 ep->req->zero = usb_raw_io_flags_zero(io->flags);
1110 ep->urb_queued = true;
1111 spin_unlock_irqrestore(&dev->lock, flags);
1112
1113 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
1114 if (ret) {
1115 dev_err(&dev->gadget->dev,
1116 "fail, usb_ep_queue returned %d\n", ret);
1117 spin_lock_irqsave(&dev->lock, flags);
1118 goto out_queue_failed;
1119 }
1120
1121 ret = wait_for_completion_interruptible(&done);
1122 if (ret) {
1123 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
1124 usb_ep_dequeue(ep->ep, ep->req);
1125 wait_for_completion(&done);
1126 spin_lock_irqsave(&dev->lock, flags);
1127 if (ep->status == -ECONNRESET)
1128 ep->status = -EINTR;
1129 goto out_interrupted;
1130 }
1131
1132 spin_lock_irqsave(&dev->lock, flags);
1133
1134 out_interrupted:
1135 ret = ep->status;
1136 out_queue_failed:
1137 ep->urb_queued = false;
1138 out_unlock:
1139 spin_unlock_irqrestore(&dev->lock, flags);
1140 return ret;
1141 }
1142
raw_ioctl_ep_write(struct raw_dev * dev,unsigned long value)1143 static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
1144 {
1145 int ret = 0;
1146 char *data;
1147 struct usb_raw_ep_io io;
1148
1149 data = raw_alloc_io_data(&io, (void __user *)value, true);
1150 if (IS_ERR(data))
1151 return PTR_ERR(data);
1152 ret = raw_process_ep_io(dev, &io, data, true);
1153 kfree(data);
1154 return ret;
1155 }
1156
raw_ioctl_ep_read(struct raw_dev * dev,unsigned long value)1157 static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
1158 {
1159 int ret = 0;
1160 char *data;
1161 struct usb_raw_ep_io io;
1162 unsigned int length;
1163
1164 data = raw_alloc_io_data(&io, (void __user *)value, false);
1165 if (IS_ERR(data))
1166 return PTR_ERR(data);
1167 ret = raw_process_ep_io(dev, &io, data, false);
1168 if (ret < 0)
1169 goto free;
1170
1171 length = min(io.length, (unsigned int)ret);
1172 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
1173 ret = -EFAULT;
1174 else
1175 ret = length;
1176 free:
1177 kfree(data);
1178 return ret;
1179 }
1180
raw_ioctl_configure(struct raw_dev * dev,unsigned long value)1181 static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
1182 {
1183 int ret = 0;
1184 unsigned long flags;
1185
1186 if (value)
1187 return -EINVAL;
1188 spin_lock_irqsave(&dev->lock, flags);
1189 if (dev->state != STATE_DEV_RUNNING) {
1190 dev_dbg(dev->dev, "fail, device is not running\n");
1191 ret = -EINVAL;
1192 goto out_unlock;
1193 }
1194 if (!dev->gadget) {
1195 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1196 ret = -EBUSY;
1197 goto out_unlock;
1198 }
1199 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1200
1201 out_unlock:
1202 spin_unlock_irqrestore(&dev->lock, flags);
1203 return ret;
1204 }
1205
raw_ioctl_vbus_draw(struct raw_dev * dev,unsigned long value)1206 static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1207 {
1208 int ret = 0;
1209 unsigned long flags;
1210
1211 spin_lock_irqsave(&dev->lock, flags);
1212 if (dev->state != STATE_DEV_RUNNING) {
1213 dev_dbg(dev->dev, "fail, device is not running\n");
1214 ret = -EINVAL;
1215 goto out_unlock;
1216 }
1217 if (!dev->gadget) {
1218 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1219 ret = -EBUSY;
1220 goto out_unlock;
1221 }
1222 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1223
1224 out_unlock:
1225 spin_unlock_irqrestore(&dev->lock, flags);
1226 return ret;
1227 }
1228
fill_ep_caps(struct usb_ep_caps * caps,struct usb_raw_ep_caps * raw_caps)1229 static void fill_ep_caps(struct usb_ep_caps *caps,
1230 struct usb_raw_ep_caps *raw_caps)
1231 {
1232 raw_caps->type_control = caps->type_control;
1233 raw_caps->type_iso = caps->type_iso;
1234 raw_caps->type_bulk = caps->type_bulk;
1235 raw_caps->type_int = caps->type_int;
1236 raw_caps->dir_in = caps->dir_in;
1237 raw_caps->dir_out = caps->dir_out;
1238 }
1239
fill_ep_limits(struct usb_ep * ep,struct usb_raw_ep_limits * limits)1240 static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
1241 {
1242 limits->maxpacket_limit = ep->maxpacket_limit;
1243 limits->max_streams = ep->max_streams;
1244 }
1245
raw_ioctl_eps_info(struct raw_dev * dev,unsigned long value)1246 static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
1247 {
1248 int ret = 0, i;
1249 unsigned long flags;
1250 struct usb_raw_eps_info *info;
1251 struct raw_ep *ep;
1252
1253 info = kzalloc(sizeof(*info), GFP_KERNEL);
1254 if (!info) {
1255 ret = -ENOMEM;
1256 goto out;
1257 }
1258
1259 spin_lock_irqsave(&dev->lock, flags);
1260 if (dev->state != STATE_DEV_RUNNING) {
1261 dev_dbg(dev->dev, "fail, device is not running\n");
1262 ret = -EINVAL;
1263 spin_unlock_irqrestore(&dev->lock, flags);
1264 goto out_free;
1265 }
1266 if (!dev->gadget) {
1267 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1268 ret = -EBUSY;
1269 spin_unlock_irqrestore(&dev->lock, flags);
1270 goto out_free;
1271 }
1272
1273 for (i = 0; i < dev->eps_num; i++) {
1274 ep = &dev->eps[i];
1275 strscpy(&info->eps[i].name[0], ep->ep->name,
1276 USB_RAW_EP_NAME_MAX);
1277 info->eps[i].addr = ep->addr;
1278 fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
1279 fill_ep_limits(ep->ep, &info->eps[i].limits);
1280 }
1281 ret = dev->eps_num;
1282 spin_unlock_irqrestore(&dev->lock, flags);
1283
1284 if (copy_to_user((void __user *)value, info, sizeof(*info)))
1285 ret = -EFAULT;
1286
1287 out_free:
1288 kfree(info);
1289 out:
1290 return ret;
1291 }
1292
raw_ioctl(struct file * fd,unsigned int cmd,unsigned long value)1293 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1294 {
1295 struct raw_dev *dev = fd->private_data;
1296 int ret = 0;
1297
1298 if (!dev)
1299 return -EBUSY;
1300
1301 switch (cmd) {
1302 case USB_RAW_IOCTL_INIT:
1303 ret = raw_ioctl_init(dev, value);
1304 break;
1305 case USB_RAW_IOCTL_RUN:
1306 ret = raw_ioctl_run(dev, value);
1307 break;
1308 case USB_RAW_IOCTL_EVENT_FETCH:
1309 ret = raw_ioctl_event_fetch(dev, value);
1310 break;
1311 case USB_RAW_IOCTL_EP0_WRITE:
1312 ret = raw_ioctl_ep0_write(dev, value);
1313 break;
1314 case USB_RAW_IOCTL_EP0_READ:
1315 ret = raw_ioctl_ep0_read(dev, value);
1316 break;
1317 case USB_RAW_IOCTL_EP_ENABLE:
1318 ret = raw_ioctl_ep_enable(dev, value);
1319 break;
1320 case USB_RAW_IOCTL_EP_DISABLE:
1321 ret = raw_ioctl_ep_disable(dev, value);
1322 break;
1323 case USB_RAW_IOCTL_EP_WRITE:
1324 ret = raw_ioctl_ep_write(dev, value);
1325 break;
1326 case USB_RAW_IOCTL_EP_READ:
1327 ret = raw_ioctl_ep_read(dev, value);
1328 break;
1329 case USB_RAW_IOCTL_CONFIGURE:
1330 ret = raw_ioctl_configure(dev, value);
1331 break;
1332 case USB_RAW_IOCTL_VBUS_DRAW:
1333 ret = raw_ioctl_vbus_draw(dev, value);
1334 break;
1335 case USB_RAW_IOCTL_EPS_INFO:
1336 ret = raw_ioctl_eps_info(dev, value);
1337 break;
1338 case USB_RAW_IOCTL_EP0_STALL:
1339 ret = raw_ioctl_ep0_stall(dev, value);
1340 break;
1341 case USB_RAW_IOCTL_EP_SET_HALT:
1342 ret = raw_ioctl_ep_set_clear_halt_wedge(
1343 dev, value, true, true);
1344 break;
1345 case USB_RAW_IOCTL_EP_CLEAR_HALT:
1346 ret = raw_ioctl_ep_set_clear_halt_wedge(
1347 dev, value, false, true);
1348 break;
1349 case USB_RAW_IOCTL_EP_SET_WEDGE:
1350 ret = raw_ioctl_ep_set_clear_halt_wedge(
1351 dev, value, true, false);
1352 break;
1353 default:
1354 ret = -EINVAL;
1355 }
1356
1357 return ret;
1358 }
1359
1360 /*----------------------------------------------------------------------*/
1361
1362 static const struct file_operations raw_fops = {
1363 .open = raw_open,
1364 .unlocked_ioctl = raw_ioctl,
1365 .compat_ioctl = raw_ioctl,
1366 .release = raw_release,
1367 };
1368
1369 static struct miscdevice raw_misc_device = {
1370 .minor = MISC_DYNAMIC_MINOR,
1371 .name = DRIVER_NAME,
1372 .fops = &raw_fops,
1373 };
1374
1375 module_misc_device(raw_misc_device);
1376