1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
11 #include <linux/usb.h>
14 /*-------------------------------------------------------------------------*/
16 /* FIXME make these public somewhere; usbdevfs.h? */
17 struct usbtest_param {
19 unsigned test_num; /* 0..(TEST_CASES-1) */
26 struct timeval duration;
28 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
30 /*-------------------------------------------------------------------------*/
32 #define GENERIC /* let probe() bind using module params */
34 /* Some devices that can be used for testing will have "real" drivers.
35 * Entries for those need to be enabled here by hand, after disabling
38 //#define IBOT2 /* grab iBOT2 webcams */
39 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
41 /*-------------------------------------------------------------------------*/
45 u8 ep_in; /* bulk/intr source */
46 u8 ep_out; /* bulk/intr sink */
49 unsigned iso:1; /* try iso in/out */
53 /* this is accessed only through usbfs ioctl calls.
54 * one ioctl to issue a test ... one lock per device.
55 * tests create other threads if they need them.
56 * urbs and buffers are allocated dynamically,
57 * and data generated deterministically.
60 struct usb_interface *intf;
61 struct usbtest_info *info;
66 struct usb_endpoint_descriptor *iso_in, *iso_out;
73 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
75 return interface_to_usbdev(test->intf);
78 /* set up all urbs so they can be used with either bulk or interrupt */
79 #define INTERRUPT_RATE 1 /* msec/transfer */
81 #define ERROR(tdev, fmt, args...) \
82 dev_err(&(tdev)->intf->dev , fmt , ## args)
83 #define WARNING(tdev, fmt, args...) \
84 dev_warn(&(tdev)->intf->dev , fmt , ## args)
86 #define GUARD_BYTE 0xA5
88 /*-------------------------------------------------------------------------*/
91 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
94 struct usb_host_interface *alt;
95 struct usb_host_endpoint *in, *out;
96 struct usb_host_endpoint *iso_in, *iso_out;
97 struct usb_device *udev;
99 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
103 iso_in = iso_out = NULL;
104 alt = intf->altsetting + tmp;
106 /* take the first altsetting with in-bulk + out-bulk;
107 * ignore other endpoints and altsettings.
109 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
110 struct usb_host_endpoint *e;
112 e = alt->endpoint + ep;
113 switch (e->desc.bmAttributes) {
114 case USB_ENDPOINT_XFER_BULK:
116 case USB_ENDPOINT_XFER_ISOC:
123 if (usb_endpoint_dir_in(&e->desc)) {
132 if (usb_endpoint_dir_in(&e->desc)) {
140 if ((in && out) || iso_in || iso_out)
146 udev = testdev_to_usbdev(dev);
147 if (alt->desc.bAlternateSetting != 0) {
148 tmp = usb_set_interface(udev,
149 alt->desc.bInterfaceNumber,
150 alt->desc.bAlternateSetting);
156 dev->in_pipe = usb_rcvbulkpipe(udev,
157 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 dev->out_pipe = usb_sndbulkpipe(udev,
159 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
162 dev->iso_in = &iso_in->desc;
163 dev->in_iso_pipe = usb_rcvisocpipe(udev,
164 iso_in->desc.bEndpointAddress
165 & USB_ENDPOINT_NUMBER_MASK);
169 dev->iso_out = &iso_out->desc;
170 dev->out_iso_pipe = usb_sndisocpipe(udev,
171 iso_out->desc.bEndpointAddress
172 & USB_ENDPOINT_NUMBER_MASK);
177 /*-------------------------------------------------------------------------*/
179 /* Support for testing basic non-queued I/O streams.
181 * These just package urbs as requests that can be easily canceled.
182 * Each urb's data buffer is dynamically allocated; callers can fill
183 * them with non-zero test data (or test for it) when appropriate.
186 static void simple_callback(struct urb *urb)
188 complete(urb->context);
191 static struct urb *usbtest_alloc_urb(
192 struct usb_device *udev,
195 unsigned transfer_flags,
200 urb = usb_alloc_urb(0, GFP_KERNEL);
203 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
204 urb->interval = (udev->speed == USB_SPEED_HIGH)
205 ? (INTERRUPT_RATE << 3)
207 urb->transfer_flags = transfer_flags;
208 if (usb_pipein(pipe))
209 urb->transfer_flags |= URB_SHORT_NOT_OK;
211 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
212 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
213 GFP_KERNEL, &urb->transfer_dma);
215 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
217 if (!urb->transfer_buffer) {
222 /* To test unaligned transfers add an offset and fill the
223 unused memory with a guard value */
225 memset(urb->transfer_buffer, GUARD_BYTE, offset);
226 urb->transfer_buffer += offset;
227 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
228 urb->transfer_dma += offset;
231 /* For inbound transfers use guard byte so that test fails if
232 data not correctly copied */
233 memset(urb->transfer_buffer,
234 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
239 static struct urb *simple_alloc_urb(
240 struct usb_device *udev,
244 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
247 static unsigned pattern;
248 static unsigned mod_pattern;
249 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
250 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
252 static inline void simple_fill_buf(struct urb *urb)
255 u8 *buf = urb->transfer_buffer;
256 unsigned len = urb->transfer_buffer_length;
265 for (i = 0; i < len; i++)
266 *buf++ = (u8) (i % 63);
271 static inline unsigned long buffer_offset(void *buf)
273 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
276 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
278 u8 *buf = urb->transfer_buffer;
279 u8 *guard = buf - buffer_offset(buf);
282 for (i = 0; guard < buf; i++, guard++) {
283 if (*guard != GUARD_BYTE) {
284 ERROR(tdev, "guard byte[%d] %d (not %d)\n",
285 i, *guard, GUARD_BYTE);
292 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
296 u8 *buf = urb->transfer_buffer;
297 unsigned len = urb->actual_length;
299 int ret = check_guard_bytes(tdev, urb);
303 for (i = 0; i < len; i++, buf++) {
305 /* all-zeroes has no synchronization issues */
309 /* mod63 stays in sync with short-terminated transfers,
310 * or otherwise when host and gadget agree on how large
311 * each usb transfer request should be. resync is done
312 * with set_interface or set_config.
317 /* always fail unsupported patterns */
322 if (*buf == expected)
324 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
330 static void simple_free_urb(struct urb *urb)
332 unsigned long offset = buffer_offset(urb->transfer_buffer);
334 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
337 urb->transfer_buffer_length + offset,
338 urb->transfer_buffer - offset,
339 urb->transfer_dma - offset);
341 kfree(urb->transfer_buffer - offset);
345 static int simple_io(
346 struct usbtest_dev *tdev,
354 struct usb_device *udev = urb->dev;
355 int max = urb->transfer_buffer_length;
356 struct completion completion;
359 urb->context = &completion;
360 while (retval == 0 && iterations-- > 0) {
361 init_completion(&completion);
362 if (usb_pipeout(urb->pipe)) {
363 simple_fill_buf(urb);
364 urb->transfer_flags |= URB_ZERO_PACKET;
366 retval = usb_submit_urb(urb, GFP_KERNEL);
370 /* NOTE: no timeouts; can't be broken out of by interrupt */
371 wait_for_completion(&completion);
372 retval = urb->status;
374 if (retval == 0 && usb_pipein(urb->pipe))
375 retval = simple_check_buf(tdev, urb);
378 int len = urb->transfer_buffer_length;
383 len = (vary < max) ? vary : max;
384 urb->transfer_buffer_length = len;
387 /* FIXME if endpoint halted, clear halt (and log) */
389 urb->transfer_buffer_length = max;
391 if (expected != retval)
393 "%s failed, iterations left %d, status %d (not %d)\n",
394 label, iterations, retval, expected);
399 /*-------------------------------------------------------------------------*/
401 /* We use scatterlist primitives to test queued I/O.
402 * Yes, this also tests the scatterlist primitives.
405 static void free_sglist(struct scatterlist *sg, int nents)
411 for (i = 0; i < nents; i++) {
412 if (!sg_page(&sg[i]))
414 kfree(sg_virt(&sg[i]));
419 static struct scatterlist *
420 alloc_sglist(int nents, int max, int vary)
422 struct scatterlist *sg;
426 sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
429 sg_init_table(sg, nents);
431 for (i = 0; i < nents; i++) {
435 buf = kzalloc(size, GFP_KERNEL);
441 /* kmalloc pages are always physically contiguous! */
442 sg_set_buf(&sg[i], buf, size);
449 for (j = 0; j < size; j++)
450 *buf++ = (u8) (j % 63);
458 size = (vary < max) ? vary : max;
465 static int perform_sglist(
466 struct usbtest_dev *tdev,
469 struct usb_sg_request *req,
470 struct scatterlist *sg,
474 struct usb_device *udev = testdev_to_usbdev(tdev);
477 while (retval == 0 && iterations-- > 0) {
478 retval = usb_sg_init(req, udev, pipe,
479 (udev->speed == USB_SPEED_HIGH)
480 ? (INTERRUPT_RATE << 3)
482 sg, nents, 0, GFP_KERNEL);
487 retval = req->status;
489 /* FIXME check resulting data pattern */
491 /* FIXME if endpoint halted, clear halt (and log) */
494 /* FIXME for unlink or fault handling tests, don't report
495 * failure if retval is as we expected ...
498 ERROR(tdev, "perform_sglist failed, "
499 "iterations left %d, status %d\n",
505 /*-------------------------------------------------------------------------*/
507 /* unqueued control message testing
509 * there's a nice set of device functional requirements in chapter 9 of the
510 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
511 * special test firmware.
513 * we know the device is configured (or suspended) by the time it's visible
514 * through usbfs. we can't change that, so we won't test enumeration (which
515 * worked 'well enough' to get here, this time), power management (ditto),
516 * or remote wakeup (which needs human interaction).
519 static unsigned realworld = 1;
520 module_param(realworld, uint, 0);
521 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
523 static int get_altsetting(struct usbtest_dev *dev)
525 struct usb_interface *iface = dev->intf;
526 struct usb_device *udev = interface_to_usbdev(iface);
529 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
530 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
531 0, iface->altsetting[0].desc.bInterfaceNumber,
532 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
544 static int set_altsetting(struct usbtest_dev *dev, int alternate)
546 struct usb_interface *iface = dev->intf;
547 struct usb_device *udev;
549 if (alternate < 0 || alternate >= 256)
552 udev = interface_to_usbdev(iface);
553 return usb_set_interface(udev,
554 iface->altsetting[0].desc.bInterfaceNumber,
558 static int is_good_config(struct usbtest_dev *tdev, int len)
560 struct usb_config_descriptor *config;
562 if (len < sizeof *config)
564 config = (struct usb_config_descriptor *) tdev->buf;
566 switch (config->bDescriptorType) {
568 case USB_DT_OTHER_SPEED_CONFIG:
569 if (config->bLength != 9) {
570 ERROR(tdev, "bogus config descriptor length\n");
573 /* this bit 'must be 1' but often isn't */
574 if (!realworld && !(config->bmAttributes & 0x80)) {
575 ERROR(tdev, "high bit of config attributes not set\n");
578 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
579 ERROR(tdev, "reserved config bits set\n");
587 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
589 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
591 ERROR(tdev, "bogus config descriptor read size\n");
595 /* sanity test for standard requests working with usb_control_mesg() and some
596 * of the utility functions which use it.
598 * this doesn't test how endpoint halts behave or data toggles get set, since
599 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
600 * halt or toggle). toggle testing is impractical without support from hcds.
602 * this avoids failing devices linux would normally work with, by not testing
603 * config/altsetting operations for devices that only support their defaults.
604 * such devices rarely support those needless operations.
606 * NOTE that since this is a sanity test, it's not examining boundary cases
607 * to see if usbcore, hcd, and device all behave right. such testing would
608 * involve varied read sizes and other operation sequences.
610 static int ch9_postconfig(struct usbtest_dev *dev)
612 struct usb_interface *iface = dev->intf;
613 struct usb_device *udev = interface_to_usbdev(iface);
616 /* [9.2.3] if there's more than one altsetting, we need to be able to
617 * set and get each one. mostly trusts the descriptors from usbcore.
619 for (i = 0; i < iface->num_altsetting; i++) {
621 /* 9.2.3 constrains the range here */
622 alt = iface->altsetting[i].desc.bAlternateSetting;
623 if (alt < 0 || alt >= iface->num_altsetting) {
625 "invalid alt [%d].bAltSetting = %d\n",
629 /* [real world] get/set unimplemented if there's only one */
630 if (realworld && iface->num_altsetting == 1)
633 /* [9.4.10] set_interface */
634 retval = set_altsetting(dev, alt);
636 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
641 /* [9.4.4] get_interface always works */
642 retval = get_altsetting(dev);
644 dev_err(&iface->dev, "get alt should be %d, was %d\n",
646 return (retval < 0) ? retval : -EDOM;
651 /* [real world] get_config unimplemented if there's only one */
652 if (!realworld || udev->descriptor.bNumConfigurations != 1) {
653 int expected = udev->actconfig->desc.bConfigurationValue;
655 /* [9.4.2] get_configuration always works
656 * ... although some cheap devices (like one TI Hub I've got)
657 * won't return config descriptors except before set_config.
659 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
660 USB_REQ_GET_CONFIGURATION,
661 USB_DIR_IN | USB_RECIP_DEVICE,
662 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
663 if (retval != 1 || dev->buf[0] != expected) {
664 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
665 retval, dev->buf[0], expected);
666 return (retval < 0) ? retval : -EDOM;
670 /* there's always [9.4.3] a device descriptor [9.6.1] */
671 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
672 dev->buf, sizeof udev->descriptor);
673 if (retval != sizeof udev->descriptor) {
674 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
675 return (retval < 0) ? retval : -EDOM;
678 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
679 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
680 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
681 dev->buf, TBUF_SIZE);
682 if (!is_good_config(dev, retval)) {
684 "config [%d] descriptor --> %d\n",
686 return (retval < 0) ? retval : -EDOM;
689 /* FIXME cross-checking udev->config[i] to make sure usbcore
690 * parsed it right (etc) would be good testing paranoia
694 /* and sometimes [9.2.6.6] speed dependent descriptors */
695 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
696 struct usb_qualifier_descriptor *d = NULL;
698 /* device qualifier [9.6.2] */
699 retval = usb_get_descriptor(udev,
700 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
701 sizeof(struct usb_qualifier_descriptor));
702 if (retval == -EPIPE) {
703 if (udev->speed == USB_SPEED_HIGH) {
705 "hs dev qualifier --> %d\n",
707 return (retval < 0) ? retval : -EDOM;
709 /* usb2.0 but not high-speed capable; fine */
710 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
711 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
712 return (retval < 0) ? retval : -EDOM;
714 d = (struct usb_qualifier_descriptor *) dev->buf;
716 /* might not have [9.6.2] any other-speed configs [9.6.4] */
718 unsigned max = d->bNumConfigurations;
719 for (i = 0; i < max; i++) {
720 retval = usb_get_descriptor(udev,
721 USB_DT_OTHER_SPEED_CONFIG, i,
722 dev->buf, TBUF_SIZE);
723 if (!is_good_config(dev, retval)) {
725 "other speed config --> %d\n",
727 return (retval < 0) ? retval : -EDOM;
732 /* FIXME fetch strings from at least the device descriptor */
734 /* [9.4.5] get_status always works */
735 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
737 dev_err(&iface->dev, "get dev status --> %d\n", retval);
738 return (retval < 0) ? retval : -EDOM;
741 /* FIXME configuration.bmAttributes says if we could try to set/clear
742 * the device's remote wakeup feature ... if we can, test that here
745 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
746 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
748 dev_err(&iface->dev, "get interface status --> %d\n", retval);
749 return (retval < 0) ? retval : -EDOM;
751 /* FIXME get status for each endpoint in the interface */
756 /*-------------------------------------------------------------------------*/
758 /* use ch9 requests to test whether:
759 * (a) queues work for control, keeping N subtests queued and
760 * active (auto-resubmit) for M loops through the queue.
761 * (b) protocol stalls (control-only) will autorecover.
762 * it's not like bulk/intr; no halt clearing.
763 * (c) short control reads are reported and handled.
764 * (d) queues are always processed in-order
769 struct usbtest_dev *dev;
770 struct completion complete;
775 struct usbtest_param *param;
779 #define NUM_SUBCASES 15 /* how many test subcases here? */
782 struct usb_ctrlrequest setup;
787 static void ctrl_complete(struct urb *urb)
789 struct ctrl_ctx *ctx = urb->context;
790 struct usb_ctrlrequest *reqp;
791 struct subcase *subcase;
792 int status = urb->status;
794 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
795 subcase = container_of(reqp, struct subcase, setup);
797 spin_lock(&ctx->lock);
801 /* queue must transfer and complete in fifo order, unless
802 * usb_unlink_urb() is used to unlink something not at the
803 * physical queue head (not tested).
805 if (subcase->number > 0) {
806 if ((subcase->number - ctx->last) != 1) {
808 "subcase %d completed out of order, last %d\n",
809 subcase->number, ctx->last);
811 ctx->last = subcase->number;
815 ctx->last = subcase->number;
817 /* succeed or fault in only one way? */
818 if (status == subcase->expected)
821 /* async unlink for cleanup? */
822 else if (status != -ECONNRESET) {
824 /* some faults are allowed, not required */
825 if (subcase->expected > 0 && (
826 ((status == -subcase->expected /* happened */
827 || status == 0)))) /* didn't */
829 /* sometimes more than one fault is allowed */
830 else if (subcase->number == 12 && status == -EPIPE)
833 ERROR(ctx->dev, "subtest %d error, status %d\n",
834 subcase->number, status);
837 /* unexpected status codes mean errors; ideally, in hardware */
840 if (ctx->status == 0) {
843 ctx->status = status;
844 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
845 "%d left, subcase %d, len %d/%d\n",
846 reqp->bRequestType, reqp->bRequest,
847 status, ctx->count, subcase->number,
849 urb->transfer_buffer_length);
851 /* FIXME this "unlink everything" exit route should
852 * be a separate test case.
855 /* unlink whatever's still pending */
856 for (i = 1; i < ctx->param->sglen; i++) {
857 struct urb *u = ctx->urb[
858 (i + subcase->number)
859 % ctx->param->sglen];
861 if (u == urb || !u->dev)
863 spin_unlock(&ctx->lock);
864 status = usb_unlink_urb(u);
865 spin_lock(&ctx->lock);
872 ERROR(ctx->dev, "urb unlink --> %d\n",
876 status = ctx->status;
880 /* resubmit if we need to, else mark this as done */
881 if ((status == 0) && (ctx->pending < ctx->count)) {
882 status = usb_submit_urb(urb, GFP_ATOMIC);
885 "can't resubmit ctrl %02x.%02x, err %d\n",
886 reqp->bRequestType, reqp->bRequest, status);
893 /* signal completion when nothing's queued */
894 if (ctx->pending == 0)
895 complete(&ctx->complete);
896 spin_unlock(&ctx->lock);
900 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
902 struct usb_device *udev = testdev_to_usbdev(dev);
904 struct ctrl_ctx context;
907 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
910 spin_lock_init(&context.lock);
912 init_completion(&context.complete);
913 context.count = param->sglen * param->iterations;
915 context.status = -ENOMEM;
916 context.param = param;
919 /* allocate and init the urbs we'll queue.
920 * as with bulk/intr sglists, sglen is the queue depth; it also
921 * controls which subtests run (more tests than sglen) or rerun.
923 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
926 for (i = 0; i < param->sglen; i++) {
927 int pipe = usb_rcvctrlpipe(udev, 0);
930 struct usb_ctrlrequest req;
931 struct subcase *reqp;
933 /* sign of this variable means:
934 * -: tested code must return this (negative) error code
935 * +: tested code may return this (negative too) error code
939 /* requests here are mostly expected to succeed on any
940 * device, but some are chosen to trigger protocol stalls
943 memset(&req, 0, sizeof req);
944 req.bRequest = USB_REQ_GET_DESCRIPTOR;
945 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
947 switch (i % NUM_SUBCASES) {
948 case 0: /* get device descriptor */
949 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
950 len = sizeof(struct usb_device_descriptor);
952 case 1: /* get first config descriptor (only) */
953 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
954 len = sizeof(struct usb_config_descriptor);
956 case 2: /* get altsetting (OFTEN STALLS) */
957 req.bRequest = USB_REQ_GET_INTERFACE;
958 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
959 /* index = 0 means first interface */
963 case 3: /* get interface status */
964 req.bRequest = USB_REQ_GET_STATUS;
965 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
969 case 4: /* get device status */
970 req.bRequest = USB_REQ_GET_STATUS;
971 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
974 case 5: /* get device qualifier (MAY STALL) */
975 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
976 len = sizeof(struct usb_qualifier_descriptor);
977 if (udev->speed != USB_SPEED_HIGH)
980 case 6: /* get first config descriptor, plus interface */
981 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
982 len = sizeof(struct usb_config_descriptor);
983 len += sizeof(struct usb_interface_descriptor);
985 case 7: /* get interface descriptor (ALWAYS STALLS) */
986 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
988 len = sizeof(struct usb_interface_descriptor);
991 /* NOTE: two consecutive stalls in the queue here.
992 * that tests fault recovery a bit more aggressively. */
993 case 8: /* clear endpoint halt (MAY STALL) */
994 req.bRequest = USB_REQ_CLEAR_FEATURE;
995 req.bRequestType = USB_RECIP_ENDPOINT;
996 /* wValue 0 == ep halt */
997 /* wIndex 0 == ep0 (shouldn't halt!) */
999 pipe = usb_sndctrlpipe(udev, 0);
1002 case 9: /* get endpoint status */
1003 req.bRequest = USB_REQ_GET_STATUS;
1004 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1008 case 10: /* trigger short read (EREMOTEIO) */
1009 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1011 expected = -EREMOTEIO;
1013 /* NOTE: two consecutive _different_ faults in the queue. */
1014 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1015 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1017 len = sizeof(struct usb_interface_descriptor);
1020 /* NOTE: sometimes even a third fault in the queue! */
1021 case 12: /* get string 0 descriptor (MAY STALL) */
1022 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1023 /* string == 0, for language IDs */
1024 len = sizeof(struct usb_interface_descriptor);
1025 /* may succeed when > 4 languages */
1026 expected = EREMOTEIO; /* or EPIPE, if no strings */
1028 case 13: /* short read, resembling case 10 */
1029 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1030 /* last data packet "should" be DATA1, not DATA0 */
1031 len = 1024 - udev->descriptor.bMaxPacketSize0;
1032 expected = -EREMOTEIO;
1034 case 14: /* short read; try to fill the last packet */
1035 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1036 /* device descriptor size == 18 bytes */
1037 len = udev->descriptor.bMaxPacketSize0;
1038 if (udev->speed == USB_SPEED_SUPER)
1048 expected = -EREMOTEIO;
1051 ERROR(dev, "bogus number of ctrl queue testcases!\n");
1052 context.status = -EINVAL;
1055 req.wLength = cpu_to_le16(len);
1056 urb[i] = u = simple_alloc_urb(udev, pipe, len);
1060 reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1064 reqp->number = i % NUM_SUBCASES;
1065 reqp->expected = expected;
1066 u->setup_packet = (char *) &reqp->setup;
1068 u->context = &context;
1069 u->complete = ctrl_complete;
1072 /* queue the urbs */
1074 spin_lock_irq(&context.lock);
1075 for (i = 0; i < param->sglen; i++) {
1076 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1077 if (context.status != 0) {
1078 ERROR(dev, "can't submit urb[%d], status %d\n",
1080 context.count = context.pending;
1085 spin_unlock_irq(&context.lock);
1087 /* FIXME set timer and time out; provide a disconnect hook */
1089 /* wait for the last one to complete */
1090 if (context.pending > 0)
1091 wait_for_completion(&context.complete);
1094 for (i = 0; i < param->sglen; i++) {
1098 kfree(urb[i]->setup_packet);
1099 simple_free_urb(urb[i]);
1102 return context.status;
1107 /*-------------------------------------------------------------------------*/
1109 static void unlink1_callback(struct urb *urb)
1111 int status = urb->status;
1113 /* we "know" -EPIPE (stall) never happens */
1115 status = usb_submit_urb(urb, GFP_ATOMIC);
1117 urb->status = status;
1118 complete(urb->context);
1122 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1125 struct completion completion;
1128 init_completion(&completion);
1129 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1132 urb->context = &completion;
1133 urb->complete = unlink1_callback;
1135 /* keep the endpoint busy. there are lots of hc/hcd-internal
1136 * states, and testing should get to all of them over time.
1138 * FIXME want additional tests for when endpoint is STALLing
1139 * due to errors, or is just NAKing requests.
1141 retval = usb_submit_urb(urb, GFP_KERNEL);
1143 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1147 /* unlinking that should always work. variable delay tests more
1148 * hcd states and code paths, even with little other system load.
1150 msleep(jiffies % (2 * INTERRUPT_RATE));
1152 while (!completion_done(&completion)) {
1153 retval = usb_unlink_urb(urb);
1158 /* we can't unlink urbs while they're completing
1159 * or if they've completed, and we haven't
1160 * resubmitted. "normal" drivers would prevent
1161 * resubmission, but since we're testing unlink
1164 ERROR(dev, "unlink retry\n");
1171 dev_err(&dev->intf->dev,
1172 "unlink fail %d\n", retval);
1181 wait_for_completion(&completion);
1182 retval = urb->status;
1183 simple_free_urb(urb);
1186 return (retval == -ECONNRESET) ? 0 : retval - 1000;
1188 return (retval == -ENOENT || retval == -EPERM) ?
1192 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1196 /* test sync and async paths */
1197 retval = unlink1(dev, pipe, len, 1);
1199 retval = unlink1(dev, pipe, len, 0);
1203 /*-------------------------------------------------------------------------*/
1206 struct completion complete;
1213 static void unlink_queued_callback(struct urb *urb)
1215 int status = urb->status;
1216 struct queued_ctx *ctx = urb->context;
1220 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1221 if (status == -ECONNRESET)
1223 /* What error should we report if the URB completed normally? */
1226 ctx->status = status;
1229 if (atomic_dec_and_test(&ctx->pending))
1230 complete(&ctx->complete);
1233 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1236 struct queued_ctx ctx;
1237 struct usb_device *udev = testdev_to_usbdev(dev);
1241 int retval = -ENOMEM;
1243 init_completion(&ctx.complete);
1244 atomic_set(&ctx.pending, 1); /* One more than the actual value */
1248 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1251 memset(buf, 0, size);
1253 /* Allocate and init the urbs we'll queue */
1254 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1257 for (i = 0; i < num; i++) {
1258 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1261 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1262 unlink_queued_callback, &ctx);
1263 ctx.urbs[i]->transfer_dma = buf_dma;
1264 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1267 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1268 for (i = 0; i < num; i++) {
1269 atomic_inc(&ctx.pending);
1270 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1272 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1274 atomic_dec(&ctx.pending);
1275 ctx.status = retval;
1280 usb_unlink_urb(ctx.urbs[num - 4]);
1281 usb_unlink_urb(ctx.urbs[num - 2]);
1284 usb_unlink_urb(ctx.urbs[i]);
1287 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
1288 complete(&ctx.complete);
1289 wait_for_completion(&ctx.complete);
1290 retval = ctx.status;
1293 for (i = 0; i < num; i++)
1294 usb_free_urb(ctx.urbs[i]);
1297 usb_free_coherent(udev, size, buf, buf_dma);
1301 /*-------------------------------------------------------------------------*/
1303 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1308 /* shouldn't look or act halted */
1309 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1311 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1316 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1319 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1325 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1330 /* should look and act halted */
1331 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1333 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1337 le16_to_cpus(&status);
1339 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1342 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1343 if (retval != -EPIPE)
1345 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1346 if (retval != -EPIPE)
1351 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1355 /* shouldn't look or act halted now */
1356 retval = verify_not_halted(tdev, ep, urb);
1360 /* set halt (protocol test only), verify it worked */
1361 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1362 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1363 USB_ENDPOINT_HALT, ep,
1364 NULL, 0, USB_CTRL_SET_TIMEOUT);
1366 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1369 retval = verify_halted(tdev, ep, urb);
1373 /* clear halt (tests API + protocol), verify it worked */
1374 retval = usb_clear_halt(urb->dev, urb->pipe);
1376 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1379 retval = verify_not_halted(tdev, ep, urb);
1383 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1388 static int halt_simple(struct usbtest_dev *dev)
1394 urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
1399 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1400 urb->pipe = dev->in_pipe;
1401 retval = test_halt(dev, ep, urb);
1406 if (dev->out_pipe) {
1407 ep = usb_pipeendpoint(dev->out_pipe);
1408 urb->pipe = dev->out_pipe;
1409 retval = test_halt(dev, ep, urb);
1412 simple_free_urb(urb);
1416 /*-------------------------------------------------------------------------*/
1418 /* Control OUT tests use the vendor control requests from Intel's
1419 * USB 2.0 compliance test device: write a buffer, read it back.
1421 * Intel's spec only _requires_ that it work for one packet, which
1422 * is pretty weak. Some HCDs place limits here; most devices will
1423 * need to be able to handle more than one OUT data packet. We'll
1424 * try whatever we're told to try.
1426 static int ctrl_out(struct usbtest_dev *dev,
1427 unsigned count, unsigned length, unsigned vary, unsigned offset)
1433 struct usb_device *udev;
1435 if (length < 1 || length > 0xffff || vary >= length)
1438 buf = kmalloc(length + offset, GFP_KERNEL);
1443 udev = testdev_to_usbdev(dev);
1447 /* NOTE: hardware might well act differently if we pushed it
1448 * with lots back-to-back queued requests.
1450 for (i = 0; i < count; i++) {
1451 /* write patterned data */
1452 for (j = 0; j < len; j++)
1454 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1455 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1456 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1457 if (retval != len) {
1460 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1467 /* read it back -- assuming nothing intervened!! */
1468 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1469 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1470 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1471 if (retval != len) {
1474 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1481 /* fail if we can't verify */
1482 for (j = 0; j < len; j++) {
1483 if (buf[j] != (u8) (i + j)) {
1484 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1485 j, buf[j], (u8) i + j);
1497 /* [real world] the "zero bytes IN" case isn't really used.
1498 * hardware can easily trip up in this weird case, since its
1499 * status stage is IN, not OUT like other ep0in transfers.
1502 len = realworld ? 1 : 0;
1506 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1509 kfree(buf - offset);
1513 /*-------------------------------------------------------------------------*/
1515 /* ISO tests ... mimics common usage
1516 * - buffer length is split into N packets (mostly maxpacket sized)
1517 * - multi-buffers according to sglen
1520 struct iso_context {
1524 struct completion done;
1526 unsigned long errors;
1527 unsigned long packet_count;
1528 struct usbtest_dev *dev;
1531 static void iso_callback(struct urb *urb)
1533 struct iso_context *ctx = urb->context;
1535 spin_lock(&ctx->lock);
1538 ctx->packet_count += urb->number_of_packets;
1539 if (urb->error_count > 0)
1540 ctx->errors += urb->error_count;
1541 else if (urb->status != 0)
1542 ctx->errors += urb->number_of_packets;
1543 else if (urb->actual_length != urb->transfer_buffer_length)
1545 else if (check_guard_bytes(ctx->dev, urb) != 0)
1548 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1549 && !ctx->submit_error) {
1550 int status = usb_submit_urb(urb, GFP_ATOMIC);
1555 dev_err(&ctx->dev->intf->dev,
1556 "iso resubmit err %d\n",
1559 case -ENODEV: /* disconnected */
1560 case -ESHUTDOWN: /* endpoint disabled */
1561 ctx->submit_error = 1;
1567 if (ctx->pending == 0) {
1569 dev_err(&ctx->dev->intf->dev,
1570 "iso test, %lu errors out of %lu\n",
1571 ctx->errors, ctx->packet_count);
1572 complete(&ctx->done);
1575 spin_unlock(&ctx->lock);
1578 static struct urb *iso_alloc_urb(
1579 struct usb_device *udev,
1581 struct usb_endpoint_descriptor *desc,
1587 unsigned i, maxp, packets;
1589 if (bytes < 0 || !desc)
1591 maxp = 0x7ff & usb_endpoint_maxp(desc);
1592 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1593 packets = DIV_ROUND_UP(bytes, maxp);
1595 urb = usb_alloc_urb(packets, GFP_KERNEL);
1601 urb->number_of_packets = packets;
1602 urb->transfer_buffer_length = bytes;
1603 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1605 &urb->transfer_dma);
1606 if (!urb->transfer_buffer) {
1611 memset(urb->transfer_buffer, GUARD_BYTE, offset);
1612 urb->transfer_buffer += offset;
1613 urb->transfer_dma += offset;
1615 /* For inbound transfers use guard byte so that test fails if
1616 data not correctly copied */
1617 memset(urb->transfer_buffer,
1618 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1621 for (i = 0; i < packets; i++) {
1622 /* here, only the last packet will be short */
1623 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1624 bytes -= urb->iso_frame_desc[i].length;
1626 urb->iso_frame_desc[i].offset = maxp * i;
1629 urb->complete = iso_callback;
1630 /* urb->context = SET BY CALLER */
1631 urb->interval = 1 << (desc->bInterval - 1);
1632 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1637 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1638 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1640 struct iso_context context;
1641 struct usb_device *udev;
1643 unsigned long packets = 0;
1645 struct urb *urbs[10]; /* FIXME no limit */
1647 if (param->sglen > 10)
1650 memset(&context, 0, sizeof context);
1651 context.count = param->iterations * param->sglen;
1653 init_completion(&context.done);
1654 spin_lock_init(&context.lock);
1656 memset(urbs, 0, sizeof urbs);
1657 udev = testdev_to_usbdev(dev);
1658 dev_info(&dev->intf->dev,
1659 "... iso period %d %sframes, wMaxPacket %04x\n",
1660 1 << (desc->bInterval - 1),
1661 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1662 usb_endpoint_maxp(desc));
1664 for (i = 0; i < param->sglen; i++) {
1665 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1666 param->length, offset);
1671 packets += urbs[i]->number_of_packets;
1672 urbs[i]->context = &context;
1674 packets *= param->iterations;
1675 dev_info(&dev->intf->dev,
1676 "... total %lu msec (%lu packets)\n",
1677 (packets * (1 << (desc->bInterval - 1)))
1678 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1681 spin_lock_irq(&context.lock);
1682 for (i = 0; i < param->sglen; i++) {
1684 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1686 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1688 spin_unlock_irq(&context.lock);
1692 simple_free_urb(urbs[i]);
1695 context.submit_error = 1;
1699 spin_unlock_irq(&context.lock);
1701 wait_for_completion(&context.done);
1703 for (i = 0; i < param->sglen; i++) {
1705 simple_free_urb(urbs[i]);
1708 * Isochronous transfers are expected to fail sometimes. As an
1709 * arbitrary limit, we will report an error if any submissions
1710 * fail or if the transfer failure rate is > 10%.
1714 else if (context.submit_error)
1716 else if (context.errors > context.packet_count / 10)
1721 for (i = 0; i < param->sglen; i++) {
1723 simple_free_urb(urbs[i]);
1728 static int test_unaligned_bulk(
1729 struct usbtest_dev *tdev,
1733 unsigned transfer_flags,
1737 struct urb *urb = usbtest_alloc_urb(
1738 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1743 retval = simple_io(tdev, urb, iterations, 0, 0, label);
1744 simple_free_urb(urb);
1748 /*-------------------------------------------------------------------------*/
1750 /* We only have this one interface to user space, through usbfs.
1751 * User mode code can scan usbfs to find N different devices (maybe on
1752 * different busses) to use when testing, and allocate one thread per
1753 * test. So discovery is simplified, and we have no device naming issues.
1755 * Don't use these only as stress/load tests. Use them along with with
1756 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1757 * video capture, and so on. Run different tests at different times, in
1758 * different sequences. Nothing here should interact with other devices,
1759 * except indirectly by consuming USB bandwidth and CPU resources for test
1760 * threads and request completion. But the only way to know that for sure
1761 * is to test when HC queues are in use by many devices.
1763 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1764 * it locks out usbcore in certain code paths. Notably, if you disconnect
1765 * the device-under-test, khubd will wait block forever waiting for the
1766 * ioctl to complete ... so that usb_disconnect() can abort the pending
1767 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1768 * off just killing the userspace task and waiting for it to exit.
1772 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1774 struct usbtest_dev *dev = usb_get_intfdata(intf);
1775 struct usb_device *udev = testdev_to_usbdev(dev);
1776 struct usbtest_param *param = buf;
1777 int retval = -EOPNOTSUPP;
1779 struct scatterlist *sg;
1780 struct usb_sg_request req;
1781 struct timeval start;
1784 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1786 pattern = mod_pattern;
1788 if (code != USBTEST_REQUEST)
1791 if (param->iterations <= 0)
1794 if (mutex_lock_interruptible(&dev->lock))
1795 return -ERESTARTSYS;
1797 /* FIXME: What if a system sleep starts while a test is running? */
1799 /* some devices, like ez-usb default devices, need a non-default
1800 * altsetting to have any active endpoints. some tests change
1801 * altsettings; force a default so most tests don't need to check.
1803 if (dev->info->alt >= 0) {
1806 if (intf->altsetting->desc.bInterfaceNumber) {
1807 mutex_unlock(&dev->lock);
1810 res = set_altsetting(dev, dev->info->alt);
1813 "set altsetting to %d failed, %d\n",
1814 dev->info->alt, res);
1815 mutex_unlock(&dev->lock);
1821 * Just a bunch of test cases that every HCD is expected to handle.
1823 * Some may need specific firmware, though it'd be good to have
1824 * one firmware image to handle all the test cases.
1826 * FIXME add more tests! cancel requests, verify the data, control
1827 * queueing, concurrent read+write threads, and so on.
1829 do_gettimeofday(&start);
1830 switch (param->test_num) {
1833 dev_info(&intf->dev, "TEST 0: NOP\n");
1837 /* Simple non-queued bulk I/O tests */
1839 if (dev->out_pipe == 0)
1841 dev_info(&intf->dev,
1842 "TEST 1: write %d bytes %u times\n",
1843 param->length, param->iterations);
1844 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1849 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1850 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1851 simple_free_urb(urb);
1854 if (dev->in_pipe == 0)
1856 dev_info(&intf->dev,
1857 "TEST 2: read %d bytes %u times\n",
1858 param->length, param->iterations);
1859 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1864 /* FIRMWARE: bulk source (maybe generates short writes) */
1865 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1866 simple_free_urb(urb);
1869 if (dev->out_pipe == 0 || param->vary == 0)
1871 dev_info(&intf->dev,
1872 "TEST 3: write/%d 0..%d bytes %u times\n",
1873 param->vary, param->length, param->iterations);
1874 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1879 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1880 retval = simple_io(dev, urb, param->iterations, param->vary,
1882 simple_free_urb(urb);
1885 if (dev->in_pipe == 0 || param->vary == 0)
1887 dev_info(&intf->dev,
1888 "TEST 4: read/%d 0..%d bytes %u times\n",
1889 param->vary, param->length, param->iterations);
1890 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1895 /* FIRMWARE: bulk source (maybe generates short writes) */
1896 retval = simple_io(dev, urb, param->iterations, param->vary,
1898 simple_free_urb(urb);
1901 /* Queued bulk I/O tests */
1903 if (dev->out_pipe == 0 || param->sglen == 0)
1905 dev_info(&intf->dev,
1906 "TEST 5: write %d sglists %d entries of %d bytes\n",
1908 param->sglen, param->length);
1909 sg = alloc_sglist(param->sglen, param->length, 0);
1914 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1915 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1916 &req, sg, param->sglen);
1917 free_sglist(sg, param->sglen);
1921 if (dev->in_pipe == 0 || param->sglen == 0)
1923 dev_info(&intf->dev,
1924 "TEST 6: read %d sglists %d entries of %d bytes\n",
1926 param->sglen, param->length);
1927 sg = alloc_sglist(param->sglen, param->length, 0);
1932 /* FIRMWARE: bulk source (maybe generates short writes) */
1933 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1934 &req, sg, param->sglen);
1935 free_sglist(sg, param->sglen);
1938 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1940 dev_info(&intf->dev,
1941 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1942 param->vary, param->iterations,
1943 param->sglen, param->length);
1944 sg = alloc_sglist(param->sglen, param->length, param->vary);
1949 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1950 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1951 &req, sg, param->sglen);
1952 free_sglist(sg, param->sglen);
1955 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1957 dev_info(&intf->dev,
1958 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1959 param->vary, param->iterations,
1960 param->sglen, param->length);
1961 sg = alloc_sglist(param->sglen, param->length, param->vary);
1966 /* FIRMWARE: bulk source (maybe generates short writes) */
1967 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1968 &req, sg, param->sglen);
1969 free_sglist(sg, param->sglen);
1972 /* non-queued sanity tests for control (chapter 9 subset) */
1975 dev_info(&intf->dev,
1976 "TEST 9: ch9 (subset) control tests, %d times\n",
1978 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1979 retval = ch9_postconfig(dev);
1981 dev_err(&intf->dev, "ch9 subset failed, "
1982 "iterations left %d\n", i);
1985 /* queued control messaging */
1988 dev_info(&intf->dev,
1989 "TEST 10: queue %d control calls, %d times\n",
1992 retval = test_ctrl_queue(dev, param);
1995 /* simple non-queued unlinks (ring with one urb) */
1997 if (dev->in_pipe == 0 || !param->length)
2000 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
2001 param->iterations, param->length);
2002 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2003 retval = unlink_simple(dev, dev->in_pipe,
2006 dev_err(&intf->dev, "unlink reads failed %d, "
2007 "iterations left %d\n", retval, i);
2010 if (dev->out_pipe == 0 || !param->length)
2013 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
2014 param->iterations, param->length);
2015 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2016 retval = unlink_simple(dev, dev->out_pipe,
2019 dev_err(&intf->dev, "unlink writes failed %d, "
2020 "iterations left %d\n", retval, i);
2025 if (dev->out_pipe == 0 && dev->in_pipe == 0)
2028 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
2030 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2031 retval = halt_simple(dev);
2034 ERROR(dev, "halts failed, iterations left %d\n", i);
2037 /* control write tests */
2039 if (!dev->info->ctrl_out)
2041 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
2043 realworld ? 1 : 0, param->length,
2045 retval = ctrl_out(dev, param->iterations,
2046 param->length, param->vary, 0);
2049 /* iso write tests */
2051 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2053 dev_info(&intf->dev,
2054 "TEST 15: write %d iso, %d entries of %d bytes\n",
2056 param->sglen, param->length);
2057 /* FIRMWARE: iso sink */
2058 retval = test_iso_queue(dev, param,
2059 dev->out_iso_pipe, dev->iso_out, 0);
2062 /* iso read tests */
2064 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2066 dev_info(&intf->dev,
2067 "TEST 16: read %d iso, %d entries of %d bytes\n",
2069 param->sglen, param->length);
2070 /* FIRMWARE: iso source */
2071 retval = test_iso_queue(dev, param,
2072 dev->in_iso_pipe, dev->iso_in, 0);
2075 /* FIXME scatterlist cancel (needs helper thread) */
2077 /* Tests for bulk I/O using DMA mapping by core and odd address */
2079 if (dev->out_pipe == 0)
2081 dev_info(&intf->dev,
2082 "TEST 17: write odd addr %d bytes %u times core map\n",
2083 param->length, param->iterations);
2085 retval = test_unaligned_bulk(
2087 param->length, param->iterations,
2092 if (dev->in_pipe == 0)
2094 dev_info(&intf->dev,
2095 "TEST 18: read odd addr %d bytes %u times core map\n",
2096 param->length, param->iterations);
2098 retval = test_unaligned_bulk(
2100 param->length, param->iterations,
2104 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2106 if (dev->out_pipe == 0)
2108 dev_info(&intf->dev,
2109 "TEST 19: write odd addr %d bytes %u times premapped\n",
2110 param->length, param->iterations);
2112 retval = test_unaligned_bulk(
2114 param->length, param->iterations,
2115 URB_NO_TRANSFER_DMA_MAP, "test19");
2119 if (dev->in_pipe == 0)
2121 dev_info(&intf->dev,
2122 "TEST 20: read odd addr %d bytes %u times premapped\n",
2123 param->length, param->iterations);
2125 retval = test_unaligned_bulk(
2127 param->length, param->iterations,
2128 URB_NO_TRANSFER_DMA_MAP, "test20");
2131 /* control write tests with unaligned buffer */
2133 if (!dev->info->ctrl_out)
2135 dev_info(&intf->dev,
2136 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2138 realworld ? 1 : 0, param->length,
2140 retval = ctrl_out(dev, param->iterations,
2141 param->length, param->vary, 1);
2144 /* unaligned iso tests */
2146 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2148 dev_info(&intf->dev,
2149 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2151 param->sglen, param->length);
2152 retval = test_iso_queue(dev, param,
2153 dev->out_iso_pipe, dev->iso_out, 1);
2157 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2159 dev_info(&intf->dev,
2160 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2162 param->sglen, param->length);
2163 retval = test_iso_queue(dev, param,
2164 dev->in_iso_pipe, dev->iso_in, 1);
2167 /* unlink URBs from a bulk-OUT queue */
2169 if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2172 dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
2173 "%d %d-byte writes\n",
2174 param->iterations, param->sglen, param->length);
2175 for (i = param->iterations; retval == 0 && i > 0; --i) {
2176 retval = unlink_queued(dev, dev->out_pipe,
2177 param->sglen, param->length);
2180 "unlink queued writes failed %d, "
2181 "iterations left %d\n", retval, i);
2188 do_gettimeofday(¶m->duration);
2189 param->duration.tv_sec -= start.tv_sec;
2190 param->duration.tv_usec -= start.tv_usec;
2191 if (param->duration.tv_usec < 0) {
2192 param->duration.tv_usec += 1000 * 1000;
2193 param->duration.tv_sec -= 1;
2195 mutex_unlock(&dev->lock);
2199 /*-------------------------------------------------------------------------*/
2201 static unsigned force_interrupt;
2202 module_param(force_interrupt, uint, 0);
2203 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2206 static unsigned short vendor;
2207 module_param(vendor, ushort, 0);
2208 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2210 static unsigned short product;
2211 module_param(product, ushort, 0);
2212 MODULE_PARM_DESC(product, "product code (from vendor)");
2216 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2218 struct usb_device *udev;
2219 struct usbtest_dev *dev;
2220 struct usbtest_info *info;
2221 char *rtest, *wtest;
2222 char *irtest, *iwtest;
2224 udev = interface_to_usbdev(intf);
2227 /* specify devices by module parameters? */
2228 if (id->match_flags == 0) {
2229 /* vendor match required, product match optional */
2230 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2232 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2234 dev_info(&intf->dev, "matched module params, "
2235 "vend=0x%04x prod=0x%04x\n",
2236 le16_to_cpu(udev->descriptor.idVendor),
2237 le16_to_cpu(udev->descriptor.idProduct));
2241 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2244 info = (struct usbtest_info *) id->driver_info;
2246 mutex_init(&dev->lock);
2250 /* cacheline-aligned scratch for i/o */
2251 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2252 if (dev->buf == NULL) {
2257 /* NOTE this doesn't yet test the handful of difference that are
2258 * visible with high speed interrupts: bigger maxpacket (1K) and
2259 * "high bandwidth" modes (up to 3 packets/uframe).
2262 irtest = iwtest = "";
2263 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2265 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2269 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2270 wtest = " intr-out";
2273 if (info->autoconf) {
2276 status = get_endpoints(dev, intf);
2278 WARNING(dev, "couldn't get endpoints, %d\n",
2282 /* may find bulk or ISO pipes */
2285 dev->in_pipe = usb_rcvbulkpipe(udev,
2288 dev->out_pipe = usb_sndbulkpipe(udev,
2294 wtest = " bulk-out";
2295 if (dev->in_iso_pipe)
2297 if (dev->out_iso_pipe)
2298 iwtest = " iso-out";
2301 usb_set_intfdata(intf, dev);
2302 dev_info(&intf->dev, "%s\n", info->name);
2303 dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2304 usb_speed_string(udev->speed),
2305 info->ctrl_out ? " in/out" : "",
2308 info->alt >= 0 ? " (+alt)" : "");
2312 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2317 static int usbtest_resume(struct usb_interface *intf)
2323 static void usbtest_disconnect(struct usb_interface *intf)
2325 struct usbtest_dev *dev = usb_get_intfdata(intf);
2327 usb_set_intfdata(intf, NULL);
2328 dev_dbg(&intf->dev, "disconnect\n");
2332 /* Basic testing only needs a device that can source or sink bulk traffic.
2333 * Any device can test control transfers (default with GENERIC binding).
2335 * Several entries work with the default EP0 implementation that's built
2336 * into EZ-USB chips. There's a default vendor ID which can be overridden
2337 * by (very) small config EEPROMS, but otherwise all these devices act
2338 * identically until firmware is loaded: only EP0 works. It turns out
2339 * to be easy to make other endpoints work, without modifying that EP0
2340 * behavior. For now, we expect that kind of firmware.
2343 /* an21xx or fx versions of ez-usb */
2344 static struct usbtest_info ez1_info = {
2345 .name = "EZ-USB device",
2351 /* fx2 version of ez-usb */
2352 static struct usbtest_info ez2_info = {
2353 .name = "FX2 device",
2359 /* ezusb family device with dedicated usb test firmware,
2361 static struct usbtest_info fw_info = {
2362 .name = "usb test device",
2366 .autoconf = 1, /* iso and ctrl_out need autoconf */
2368 .iso = 1, /* iso_ep's are #8 in/out */
2371 /* peripheral running Linux and 'zero.c' test firmware, or
2372 * its user-mode cousin. different versions of this use
2373 * different hardware with the same vendor/product codes.
2374 * host side MUST rely on the endpoint descriptors.
2376 static struct usbtest_info gz_info = {
2377 .name = "Linux gadget zero",
2383 static struct usbtest_info um_info = {
2384 .name = "Linux user mode test driver",
2389 static struct usbtest_info um2_info = {
2390 .name = "Linux user mode ISO test driver",
2397 /* this is a nice source of high speed bulk data;
2398 * uses an FX2, with firmware provided in the device
2400 static struct usbtest_info ibot2_info = {
2401 .name = "iBOT2 webcam",
2408 /* we can use any device to test control traffic */
2409 static struct usbtest_info generic_info = {
2410 .name = "Generic USB device",
2416 static const struct usb_device_id id_table[] = {
2418 /*-------------------------------------------------------------*/
2420 /* EZ-USB devices which download firmware to replace (or in our
2421 * case augment) the default device implementation.
2424 /* generic EZ-USB FX controller */
2425 { USB_DEVICE(0x0547, 0x2235),
2426 .driver_info = (unsigned long) &ez1_info,
2429 /* CY3671 development board with EZ-USB FX */
2430 { USB_DEVICE(0x0547, 0x0080),
2431 .driver_info = (unsigned long) &ez1_info,
2434 /* generic EZ-USB FX2 controller (or development board) */
2435 { USB_DEVICE(0x04b4, 0x8613),
2436 .driver_info = (unsigned long) &ez2_info,
2439 /* re-enumerated usb test device firmware */
2440 { USB_DEVICE(0xfff0, 0xfff0),
2441 .driver_info = (unsigned long) &fw_info,
2444 /* "Gadget Zero" firmware runs under Linux */
2445 { USB_DEVICE(0x0525, 0xa4a0),
2446 .driver_info = (unsigned long) &gz_info,
2449 /* so does a user-mode variant */
2450 { USB_DEVICE(0x0525, 0xa4a4),
2451 .driver_info = (unsigned long) &um_info,
2454 /* ... and a user-mode variant that talks iso */
2455 { USB_DEVICE(0x0525, 0xa4a3),
2456 .driver_info = (unsigned long) &um2_info,
2460 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2461 /* this does not coexist with the real Keyspan 19qi driver! */
2462 { USB_DEVICE(0x06cd, 0x010b),
2463 .driver_info = (unsigned long) &ez1_info,
2467 /*-------------------------------------------------------------*/
2470 /* iBOT2 makes a nice source of high speed bulk-in data */
2471 /* this does not coexist with a real iBOT2 driver! */
2472 { USB_DEVICE(0x0b62, 0x0059),
2473 .driver_info = (unsigned long) &ibot2_info,
2477 /*-------------------------------------------------------------*/
2480 /* module params can specify devices to use for control tests */
2481 { .driver_info = (unsigned long) &generic_info, },
2484 /*-------------------------------------------------------------*/
2488 MODULE_DEVICE_TABLE(usb, id_table);
2490 static struct usb_driver usbtest_driver = {
2492 .id_table = id_table,
2493 .probe = usbtest_probe,
2494 .unlocked_ioctl = usbtest_ioctl,
2495 .disconnect = usbtest_disconnect,
2496 .suspend = usbtest_suspend,
2497 .resume = usbtest_resume,
2500 /*-------------------------------------------------------------------------*/
2502 static int __init usbtest_init(void)
2506 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2508 return usb_register(&usbtest_driver);
2510 module_init(usbtest_init);
2512 static void __exit usbtest_exit(void)
2514 usb_deregister(&usbtest_driver);
2516 module_exit(usbtest_exit);
2518 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2519 MODULE_LICENSE("GPL");