2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/dmapool.h>
30 * Allocates a generic ring segment from the ring pool, sets the dma address,
31 * initializes the segment to zero, and sets the private next pointer to NULL.
34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
38 struct xhci_segment *seg;
41 seg = kzalloc(sizeof *seg, flags);
44 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
51 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 seg->trbs, (unsigned long long)dma);
54 memset(seg->trbs, 0, SEGMENT_SIZE);
61 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
66 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 seg->trbs, (unsigned long long)seg->dma);
68 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
71 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
76 * Make the prev segment point to the next segment.
78 * Change the last TRB in the prev segment to be a Link TRB which points to the
79 * DMA address of the next segment. The caller needs to set any Link TRB
80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
83 struct xhci_segment *next, bool link_trbs)
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
95 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK);
97 /* Always set the chain bit with 0.95 hardware */
98 if (xhci_link_trb_quirk(xhci))
100 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
102 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
103 (unsigned long long)prev->dma,
104 (unsigned long long)next->dma);
107 /* XXX: Do we need the hcd structure in all these functions? */
108 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
110 struct xhci_segment *seg;
111 struct xhci_segment *first_seg;
113 if (!ring || !ring->first_seg)
115 first_seg = ring->first_seg;
116 seg = first_seg->next;
117 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
118 while (seg != first_seg) {
119 struct xhci_segment *next = seg->next;
120 xhci_segment_free(xhci, seg);
123 xhci_segment_free(xhci, first_seg);
124 ring->first_seg = NULL;
129 * Create a new ring with zero or more segments.
131 * Link each segment together into a ring.
132 * Set the end flag and the cycle toggle bit on the last segment.
133 * See section 4.9.1 and figures 15 and 16.
135 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
136 unsigned int num_segs, bool link_trbs, gfp_t flags)
138 struct xhci_ring *ring;
139 struct xhci_segment *prev;
141 ring = kzalloc(sizeof *(ring), flags);
142 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
146 INIT_LIST_HEAD(&ring->td_list);
150 ring->first_seg = xhci_segment_alloc(xhci, flags);
151 if (!ring->first_seg)
155 prev = ring->first_seg;
156 while (num_segs > 0) {
157 struct xhci_segment *next;
159 next = xhci_segment_alloc(xhci, flags);
162 xhci_link_segments(xhci, prev, next, link_trbs);
167 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
170 /* See section 4.9.2.1 and 6.4.4.1 */
171 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
172 xhci_dbg(xhci, "Wrote link toggle flag to"
173 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev, (unsigned long long)prev->dma);
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */
177 ring->enqueue = ring->first_seg->trbs;
178 ring->enq_seg = ring->first_seg;
179 ring->dequeue = ring->enqueue;
180 ring->deq_seg = ring->first_seg;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
185 ring->cycle_state = 1;
190 xhci_ring_free(xhci, ring);
194 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
196 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
197 int type, gfp_t flags)
199 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
203 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
205 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
206 if (type == XHCI_CTX_TYPE_INPUT)
207 ctx->size += CTX_SIZE(xhci->hcc_params);
209 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
210 memset(ctx->bytes, 0, ctx->size);
214 void xhci_free_container_ctx(struct xhci_hcd *xhci,
215 struct xhci_container_ctx *ctx)
217 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
221 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
222 struct xhci_container_ctx *ctx)
224 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
225 return (struct xhci_input_control_ctx *)ctx->bytes;
228 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
229 struct xhci_container_ctx *ctx)
231 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
232 return (struct xhci_slot_ctx *)ctx->bytes;
234 return (struct xhci_slot_ctx *)
235 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
238 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
239 struct xhci_container_ctx *ctx,
240 unsigned int ep_index)
242 /* increment ep index by offset of start of ep ctx array */
244 if (ctx->type == XHCI_CTX_TYPE_INPUT)
247 return (struct xhci_ep_ctx *)
248 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
251 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
252 struct xhci_virt_ep *ep)
254 init_timer(&ep->stop_cmd_timer);
255 ep->stop_cmd_timer.data = (unsigned long) ep;
256 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
260 /* All the xhci_tds in the ring's TD list should be freed at this point */
261 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
263 struct xhci_virt_device *dev;
266 /* Slot ID 0 is reserved */
267 if (slot_id == 0 || !xhci->devs[slot_id])
270 dev = xhci->devs[slot_id];
271 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
275 for (i = 0; i < 31; ++i)
276 if (dev->eps[i].ring)
277 xhci_ring_free(xhci, dev->eps[i].ring);
280 xhci_free_container_ctx(xhci, dev->in_ctx);
282 xhci_free_container_ctx(xhci, dev->out_ctx);
284 kfree(xhci->devs[slot_id]);
285 xhci->devs[slot_id] = 0;
288 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
289 struct usb_device *udev, gfp_t flags)
291 struct xhci_virt_device *dev;
294 /* Slot ID 0 is reserved */
295 if (slot_id == 0 || xhci->devs[slot_id]) {
296 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
300 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
301 if (!xhci->devs[slot_id])
303 dev = xhci->devs[slot_id];
305 /* Allocate the (output) device context that will be used in the HC. */
306 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
310 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
311 (unsigned long long)dev->out_ctx->dma);
313 /* Allocate the (input) device context for address device command */
314 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
318 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
319 (unsigned long long)dev->in_ctx->dma);
321 /* Initialize the cancellation list and watchdog timers for each ep */
322 for (i = 0; i < 31; i++) {
323 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
324 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
327 /* Allocate endpoint 0 ring */
328 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
329 if (!dev->eps[0].ring)
332 init_completion(&dev->cmd_completion);
333 INIT_LIST_HEAD(&dev->cmd_list);
335 /* Point to output device context in dcbaa. */
336 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
337 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
339 &xhci->dcbaa->dev_context_ptrs[slot_id],
340 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
344 xhci_free_virt_device(xhci, slot_id);
348 /* Setup an xHCI virtual device for a Set Address command */
349 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
351 struct xhci_virt_device *dev;
352 struct xhci_ep_ctx *ep0_ctx;
353 struct usb_device *top_dev;
354 struct xhci_slot_ctx *slot_ctx;
355 struct xhci_input_control_ctx *ctrl_ctx;
357 dev = xhci->devs[udev->slot_id];
358 /* Slot ID 0 is reserved */
359 if (udev->slot_id == 0 || !dev) {
360 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
364 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
365 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
366 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
368 /* 2) New slot context and endpoint 0 context are valid*/
369 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
371 /* 3) Only the control endpoint is valid - one endpoint context */
372 slot_ctx->dev_info |= LAST_CTX(1);
374 slot_ctx->dev_info |= (u32) udev->route;
375 switch (udev->speed) {
376 case USB_SPEED_SUPER:
377 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
380 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
383 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
386 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
388 case USB_SPEED_VARIABLE:
389 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
393 /* Speed was set earlier, this shouldn't happen. */
396 /* Find the root hub port this device is under */
397 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
398 top_dev = top_dev->parent)
399 /* Found device below root hub */;
400 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
401 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
403 /* Is this a LS/FS device under a HS hub? */
404 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
406 slot_ctx->tt_info = udev->tt->hub->slot_id;
407 slot_ctx->tt_info |= udev->ttport << 8;
409 slot_ctx->dev_info |= DEV_MTT;
411 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
412 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
414 /* Step 4 - ring already allocated */
416 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
418 * XXX: Not sure about wireless USB devices.
420 switch (udev->speed) {
421 case USB_SPEED_SUPER:
422 ep0_ctx->ep_info2 |= MAX_PACKET(512);
425 /* USB core guesses at a 64-byte max packet first for FS devices */
427 ep0_ctx->ep_info2 |= MAX_PACKET(64);
430 ep0_ctx->ep_info2 |= MAX_PACKET(8);
432 case USB_SPEED_VARIABLE:
433 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
440 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
441 ep0_ctx->ep_info2 |= MAX_BURST(0);
442 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
445 dev->eps[0].ring->first_seg->dma;
446 ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
448 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
453 /* Return the polling or NAK interval.
455 * The polling interval is expressed in "microframes". If xHCI's Interval field
456 * is set to N, it will service the endpoint every 2^(Interval)*125us.
458 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
461 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
462 struct usb_host_endpoint *ep)
464 unsigned int interval = 0;
466 switch (udev->speed) {
469 if (usb_endpoint_xfer_control(&ep->desc) ||
470 usb_endpoint_xfer_bulk(&ep->desc))
471 interval = ep->desc.bInterval;
472 /* Fall through - SS and HS isoc/int have same decoding */
473 case USB_SPEED_SUPER:
474 if (usb_endpoint_xfer_int(&ep->desc) ||
475 usb_endpoint_xfer_isoc(&ep->desc)) {
476 if (ep->desc.bInterval == 0)
479 interval = ep->desc.bInterval - 1;
482 if (interval != ep->desc.bInterval + 1)
483 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
484 ep->desc.bEndpointAddress, 1 << interval);
487 /* Convert bInterval (in 1-255 frames) to microframes and round down to
488 * nearest power of 2.
492 if (usb_endpoint_xfer_int(&ep->desc) ||
493 usb_endpoint_xfer_isoc(&ep->desc)) {
494 interval = fls(8*ep->desc.bInterval) - 1;
499 if ((1 << interval) != 8*ep->desc.bInterval)
500 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
501 ep->desc.bEndpointAddress, 1 << interval);
507 return EP_INTERVAL(interval);
510 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
511 struct usb_host_endpoint *ep)
516 in = usb_endpoint_dir_in(&ep->desc);
517 if (usb_endpoint_xfer_control(&ep->desc)) {
518 type = EP_TYPE(CTRL_EP);
519 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
521 type = EP_TYPE(BULK_IN_EP);
523 type = EP_TYPE(BULK_OUT_EP);
524 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
526 type = EP_TYPE(ISOC_IN_EP);
528 type = EP_TYPE(ISOC_OUT_EP);
529 } else if (usb_endpoint_xfer_int(&ep->desc)) {
531 type = EP_TYPE(INT_IN_EP);
533 type = EP_TYPE(INT_OUT_EP);
540 int xhci_endpoint_init(struct xhci_hcd *xhci,
541 struct xhci_virt_device *virt_dev,
542 struct usb_device *udev,
543 struct usb_host_endpoint *ep,
546 unsigned int ep_index;
547 struct xhci_ep_ctx *ep_ctx;
548 struct xhci_ring *ep_ring;
549 unsigned int max_packet;
550 unsigned int max_burst;
552 ep_index = xhci_get_endpoint_index(&ep->desc);
553 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
555 /* Set up the endpoint ring */
556 virt_dev->eps[ep_index].new_ring =
557 xhci_ring_alloc(xhci, 1, true, mem_flags);
558 if (!virt_dev->eps[ep_index].new_ring)
560 ep_ring = virt_dev->eps[ep_index].new_ring;
561 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
563 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
565 /* FIXME dig Mult and streams info out of ep companion desc */
567 /* Allow 3 retries for everything but isoc;
568 * error count = 0 means infinite retries.
570 if (!usb_endpoint_xfer_isoc(&ep->desc))
571 ep_ctx->ep_info2 = ERROR_COUNT(3);
573 ep_ctx->ep_info2 = ERROR_COUNT(1);
575 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
577 /* Set the max packet size and max burst */
578 switch (udev->speed) {
579 case USB_SPEED_SUPER:
580 max_packet = ep->desc.wMaxPacketSize;
581 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
582 /* dig out max burst from ep companion desc */
583 if (!ep->ss_ep_comp) {
584 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
587 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
589 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
592 /* bits 11:12 specify the number of additional transaction
593 * opportunities per microframe (USB 2.0, section 9.6.6)
595 if (usb_endpoint_xfer_isoc(&ep->desc) ||
596 usb_endpoint_xfer_int(&ep->desc)) {
597 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
598 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
603 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
604 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
609 /* FIXME Debug endpoint context */
613 void xhci_endpoint_zero(struct xhci_hcd *xhci,
614 struct xhci_virt_device *virt_dev,
615 struct usb_host_endpoint *ep)
617 unsigned int ep_index;
618 struct xhci_ep_ctx *ep_ctx;
620 ep_index = xhci_get_endpoint_index(&ep->desc);
621 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
624 ep_ctx->ep_info2 = 0;
627 /* Don't free the endpoint ring until the set interface or configuration
632 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
633 * Useful when you want to change one particular aspect of the endpoint and then
634 * issue a configure endpoint command.
636 void xhci_endpoint_copy(struct xhci_hcd *xhci,
637 struct xhci_container_ctx *in_ctx,
638 struct xhci_container_ctx *out_ctx,
639 unsigned int ep_index)
641 struct xhci_ep_ctx *out_ep_ctx;
642 struct xhci_ep_ctx *in_ep_ctx;
644 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
645 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
647 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
648 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
649 in_ep_ctx->deq = out_ep_ctx->deq;
650 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
653 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
654 * Useful when you want to change one particular aspect of the endpoint and then
655 * issue a configure endpoint command. Only the context entries field matters,
656 * but we'll copy the whole thing anyway.
658 void xhci_slot_copy(struct xhci_hcd *xhci,
659 struct xhci_container_ctx *in_ctx,
660 struct xhci_container_ctx *out_ctx)
662 struct xhci_slot_ctx *in_slot_ctx;
663 struct xhci_slot_ctx *out_slot_ctx;
665 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
666 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
668 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
669 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
670 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
671 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
674 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
675 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
678 struct device *dev = xhci_to_hcd(xhci)->self.controller;
679 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
681 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
686 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
687 if (!xhci->scratchpad)
690 xhci->scratchpad->sp_array =
691 pci_alloc_consistent(to_pci_dev(dev),
692 num_sp * sizeof(u64),
693 &xhci->scratchpad->sp_dma);
694 if (!xhci->scratchpad->sp_array)
697 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
698 if (!xhci->scratchpad->sp_buffers)
701 xhci->scratchpad->sp_dma_buffers =
702 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
704 if (!xhci->scratchpad->sp_dma_buffers)
707 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
708 for (i = 0; i < num_sp; i++) {
710 void *buf = pci_alloc_consistent(to_pci_dev(dev),
711 xhci->page_size, &dma);
715 xhci->scratchpad->sp_array[i] = dma;
716 xhci->scratchpad->sp_buffers[i] = buf;
717 xhci->scratchpad->sp_dma_buffers[i] = dma;
723 for (i = i - 1; i >= 0; i--) {
724 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
725 xhci->scratchpad->sp_buffers[i],
726 xhci->scratchpad->sp_dma_buffers[i]);
728 kfree(xhci->scratchpad->sp_dma_buffers);
731 kfree(xhci->scratchpad->sp_buffers);
734 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
735 xhci->scratchpad->sp_array,
736 xhci->scratchpad->sp_dma);
739 kfree(xhci->scratchpad);
740 xhci->scratchpad = NULL;
746 static void scratchpad_free(struct xhci_hcd *xhci)
750 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
752 if (!xhci->scratchpad)
755 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
757 for (i = 0; i < num_sp; i++) {
758 pci_free_consistent(pdev, xhci->page_size,
759 xhci->scratchpad->sp_buffers[i],
760 xhci->scratchpad->sp_dma_buffers[i]);
762 kfree(xhci->scratchpad->sp_dma_buffers);
763 kfree(xhci->scratchpad->sp_buffers);
764 pci_free_consistent(pdev, num_sp * sizeof(u64),
765 xhci->scratchpad->sp_array,
766 xhci->scratchpad->sp_dma);
767 kfree(xhci->scratchpad);
768 xhci->scratchpad = NULL;
771 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
772 bool allocate_completion, gfp_t mem_flags)
774 struct xhci_command *command;
776 command = kzalloc(sizeof(*command), mem_flags);
781 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
782 if (!command->in_ctx) {
787 if (allocate_completion) {
788 command->completion =
789 kzalloc(sizeof(struct completion), mem_flags);
790 if (!command->completion) {
791 xhci_free_container_ctx(xhci, command->in_ctx);
795 init_completion(command->completion);
799 INIT_LIST_HEAD(&command->cmd_list);
803 void xhci_free_command(struct xhci_hcd *xhci,
804 struct xhci_command *command)
806 xhci_free_container_ctx(xhci,
808 kfree(command->completion);
812 void xhci_mem_cleanup(struct xhci_hcd *xhci)
814 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
818 /* Free the Event Ring Segment Table and the actual Event Ring */
820 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
821 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
822 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
824 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
825 if (xhci->erst.entries)
826 pci_free_consistent(pdev, size,
827 xhci->erst.entries, xhci->erst.erst_dma_addr);
828 xhci->erst.entries = NULL;
829 xhci_dbg(xhci, "Freed ERST\n");
830 if (xhci->event_ring)
831 xhci_ring_free(xhci, xhci->event_ring);
832 xhci->event_ring = NULL;
833 xhci_dbg(xhci, "Freed event ring\n");
835 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
837 xhci_ring_free(xhci, xhci->cmd_ring);
838 xhci->cmd_ring = NULL;
839 xhci_dbg(xhci, "Freed command ring\n");
841 for (i = 1; i < MAX_HC_SLOTS; ++i)
842 xhci_free_virt_device(xhci, i);
844 if (xhci->segment_pool)
845 dma_pool_destroy(xhci->segment_pool);
846 xhci->segment_pool = NULL;
847 xhci_dbg(xhci, "Freed segment pool\n");
849 if (xhci->device_pool)
850 dma_pool_destroy(xhci->device_pool);
851 xhci->device_pool = NULL;
852 xhci_dbg(xhci, "Freed device context pool\n");
854 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
856 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
857 xhci->dcbaa, xhci->dcbaa->dma);
860 scratchpad_free(xhci);
862 xhci->page_shift = 0;
865 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
866 struct xhci_segment *input_seg,
867 union xhci_trb *start_trb,
868 union xhci_trb *end_trb,
869 dma_addr_t input_dma,
870 struct xhci_segment *result_seg,
871 char *test_name, int test_number)
873 unsigned long long start_dma;
874 unsigned long long end_dma;
875 struct xhci_segment *seg;
877 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
878 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
880 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
881 if (seg != result_seg) {
882 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
883 test_name, test_number);
884 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
885 "input DMA 0x%llx\n",
887 (unsigned long long) input_dma);
888 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
889 "ending TRB %p (0x%llx DMA)\n",
890 start_trb, start_dma,
892 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
899 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
900 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
903 dma_addr_t input_dma;
904 struct xhci_segment *result_seg;
905 } simple_test_vector [] = {
906 /* A zeroed DMA field should fail */
908 /* One TRB before the ring start should fail */
909 { xhci->event_ring->first_seg->dma - 16, NULL },
910 /* One byte before the ring start should fail */
911 { xhci->event_ring->first_seg->dma - 1, NULL },
912 /* Starting TRB should succeed */
913 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
914 /* Ending TRB should succeed */
915 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
916 xhci->event_ring->first_seg },
917 /* One byte after the ring end should fail */
918 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
919 /* One TRB after the ring end should fail */
920 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
921 /* An address of all ones should fail */
922 { (dma_addr_t) (~0), NULL },
925 struct xhci_segment *input_seg;
926 union xhci_trb *start_trb;
927 union xhci_trb *end_trb;
928 dma_addr_t input_dma;
929 struct xhci_segment *result_seg;
930 } complex_test_vector [] = {
931 /* Test feeding a valid DMA address from a different ring */
932 { .input_seg = xhci->event_ring->first_seg,
933 .start_trb = xhci->event_ring->first_seg->trbs,
934 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
935 .input_dma = xhci->cmd_ring->first_seg->dma,
938 /* Test feeding a valid end TRB from a different ring */
939 { .input_seg = xhci->event_ring->first_seg,
940 .start_trb = xhci->event_ring->first_seg->trbs,
941 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
942 .input_dma = xhci->cmd_ring->first_seg->dma,
945 /* Test feeding a valid start and end TRB from a different ring */
946 { .input_seg = xhci->event_ring->first_seg,
947 .start_trb = xhci->cmd_ring->first_seg->trbs,
948 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
949 .input_dma = xhci->cmd_ring->first_seg->dma,
952 /* TRB in this ring, but after this TD */
953 { .input_seg = xhci->event_ring->first_seg,
954 .start_trb = &xhci->event_ring->first_seg->trbs[0],
955 .end_trb = &xhci->event_ring->first_seg->trbs[3],
956 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
959 /* TRB in this ring, but before this TD */
960 { .input_seg = xhci->event_ring->first_seg,
961 .start_trb = &xhci->event_ring->first_seg->trbs[3],
962 .end_trb = &xhci->event_ring->first_seg->trbs[6],
963 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
966 /* TRB in this ring, but after this wrapped TD */
967 { .input_seg = xhci->event_ring->first_seg,
968 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
969 .end_trb = &xhci->event_ring->first_seg->trbs[1],
970 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
973 /* TRB in this ring, but before this wrapped TD */
974 { .input_seg = xhci->event_ring->first_seg,
975 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
976 .end_trb = &xhci->event_ring->first_seg->trbs[1],
977 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
980 /* TRB not in this ring, and we have a wrapped TD */
981 { .input_seg = xhci->event_ring->first_seg,
982 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
983 .end_trb = &xhci->event_ring->first_seg->trbs[1],
984 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
989 unsigned int num_tests;
992 num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
993 for (i = 0; i < num_tests; i++) {
994 ret = xhci_test_trb_in_td(xhci,
995 xhci->event_ring->first_seg,
996 xhci->event_ring->first_seg->trbs,
997 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
998 simple_test_vector[i].input_dma,
999 simple_test_vector[i].result_seg,
1005 num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1006 for (i = 0; i < num_tests; i++) {
1007 ret = xhci_test_trb_in_td(xhci,
1008 complex_test_vector[i].input_seg,
1009 complex_test_vector[i].start_trb,
1010 complex_test_vector[i].end_trb,
1011 complex_test_vector[i].input_dma,
1012 complex_test_vector[i].result_seg,
1017 xhci_dbg(xhci, "TRB math tests passed.\n");
1022 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1025 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1026 unsigned int val, val2;
1028 struct xhci_segment *seg;
1032 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
1033 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
1034 for (i = 0; i < 16; i++) {
1035 if ((0x1 & page_size) != 0)
1037 page_size = page_size >> 1;
1040 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
1042 xhci_warn(xhci, "WARN: no supported page size\n");
1043 /* Use 4K pages, since that's common and the minimum the HC supports */
1044 xhci->page_shift = 12;
1045 xhci->page_size = 1 << xhci->page_shift;
1046 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
1049 * Program the Number of Device Slots Enabled field in the CONFIG
1050 * register with the max value of slots the HC can handle.
1052 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
1053 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
1054 (unsigned int) val);
1055 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
1056 val |= (val2 & ~HCS_SLOTS_MASK);
1057 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
1058 (unsigned int) val);
1059 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
1062 * Section 5.4.8 - doorbell array must be
1063 * "physically contiguous and 64-byte (cache line) aligned".
1065 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
1066 sizeof(*xhci->dcbaa), &dma);
1069 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
1070 xhci->dcbaa->dma = dma;
1071 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1072 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
1073 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
1076 * Initialize the ring segment pool. The ring must be a contiguous
1077 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1078 * however, the command ring segment needs 64-byte aligned segments,
1079 * so we pick the greater alignment need.
1081 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
1082 SEGMENT_SIZE, 64, xhci->page_size);
1084 /* See Table 46 and Note on Figure 55 */
1085 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
1086 2112, 64, xhci->page_size);
1087 if (!xhci->segment_pool || !xhci->device_pool)
1090 /* Set up the command ring to have one segments for now. */
1091 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
1092 if (!xhci->cmd_ring)
1094 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
1095 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
1096 (unsigned long long)xhci->cmd_ring->first_seg->dma);
1098 /* Set the address in the Command Ring Control register */
1099 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1100 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
1101 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
1102 xhci->cmd_ring->cycle_state;
1103 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
1104 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
1105 xhci_dbg_cmd_ptrs(xhci);
1107 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
1109 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1110 " from cap regs base addr\n", val);
1111 xhci->dba = (void *) xhci->cap_regs + val;
1112 xhci_dbg_regs(xhci);
1113 xhci_print_run_regs(xhci);
1114 /* Set ir_set to interrupt register set 0 */
1115 xhci->ir_set = (void *) xhci->run_regs->ir_set;
1118 * Event ring setup: Allocate a normal ring, but also setup
1119 * the event ring segment table (ERST). Section 4.9.3.
1121 xhci_dbg(xhci, "// Allocating event ring\n");
1122 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
1123 if (!xhci->event_ring)
1125 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1128 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
1129 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
1130 if (!xhci->erst.entries)
1132 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
1133 (unsigned long long)dma);
1135 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
1136 xhci->erst.num_entries = ERST_NUM_SEGS;
1137 xhci->erst.erst_dma_addr = dma;
1138 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1139 xhci->erst.num_entries,
1141 (unsigned long long)xhci->erst.erst_dma_addr);
1143 /* set ring base address and size for each segment table entry */
1144 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
1145 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
1146 entry->seg_addr = seg->dma;
1147 entry->seg_size = TRBS_PER_SEGMENT;
1152 /* set ERST count with the number of entries in the segment table */
1153 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
1154 val &= ERST_SIZE_MASK;
1155 val |= ERST_NUM_SEGS;
1156 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1158 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
1160 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
1161 /* set the segment table base address */
1162 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1163 (unsigned long long)xhci->erst.erst_dma_addr);
1164 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
1165 val_64 &= ERST_PTR_MASK;
1166 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
1167 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
1169 /* Set the event ring dequeue address */
1170 xhci_set_hc_event_deq(xhci);
1171 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1172 xhci_print_ir_set(xhci, xhci->ir_set, 0);
1175 * XXX: Might need to set the Interrupter Moderation Register to
1176 * something other than the default (~1ms minimum between interrupts).
1177 * See section 5.5.1.2.
1179 init_completion(&xhci->addr_dev);
1180 for (i = 0; i < MAX_HC_SLOTS; ++i)
1183 if (scratchpad_alloc(xhci, flags))
1189 xhci_warn(xhci, "Couldn't initialize memory\n");
1190 xhci_mem_cleanup(xhci);