summary refs log tree commit diff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 09:25:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 09:25:16 -0700
commitbe90a49ca22a95f184d9f32d35b5247b44032849 (patch)
treed3c2edc18c003c384366f57901616ac29c80bc27 /drivers/usb/host/xhci-ring.c
parent1f0918d03ff4b5c94540c71ce889672abdbc2f4a (diff)
parenta87371b477774b290c27bc5cb7f4ccc5379574a9 (diff)
downloadlinux-be90a49ca22a95f184d9f32d35b5247b44032849.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (142 commits)
  USB: Fix sysfs paths in documentation
  USB: skeleton: fix coding style issues.
  USB: O_NONBLOCK in read path of skeleton
  USB: make usb-skeleton honor O_NONBLOCK in write path
  USB: skel_read really sucks royally
  USB: Add hub descriptor update hook for xHCI
  USB: xhci: Support USB hubs.
  USB: xhci: Set multi-TT field for LS/FS devices under hubs.
  USB: xhci: Set route string for all devices.
  USB: xhci: Fix command wait list handling.
  USB: xhci: Change how xHCI commands are handled.
  USB: xhci: Refactor input device context setup.
  USB: xhci: Endpoint representation refactoring.
  USB: gadget: ether needs to select CRC32
  USB: fix USBTMC get_capabilities success handling
  USB: fix missing error check in probing
  USB: usbfs: add USBDEVFS_URB_BULK_CONTINUATION flag
  USB: support for autosuspend in sierra while online
  USB: ehci-dbgp,ehci: Allow dbpg to work with suspend/resume
  USB: ehci-dbgp,documentation: Documentation updates for ehci-dbgp
  ...
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c377
1 files changed, 311 insertions, 66 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index aa88a067148b..173c39c76489 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
  * have their chain bit cleared (so that each Link TRB is a separate TD).
  *
  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set.
- * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ * set, but other sections talk about dealing with the chain bit set.  This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  */
 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
 {
@@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
 		if (!consumer) {
 			if (ring != xhci->event_ring) {
-				next->link.control &= ~TRB_CHAIN;
-				next->link.control |= chain;
+				/* If we're not dealing with 0.95 hardware,
+				 * carry over the chain bit of the previous TRB
+				 * (which may mean the chain bit is cleared).
+				 */
+				if (!xhci_link_trb_quirk(xhci)) {
+					next->link.control &= ~TRB_CHAIN;
+					next->link.control |= chain;
+				}
 				/* Give this link TRB to the hardware */
 				wmb();
 				if (next->link.control & TRB_CYCLE)
@@ -289,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
 		unsigned int slot_id,
 		unsigned int ep_index)
 {
-	struct xhci_ring *ep_ring;
+	struct xhci_virt_ep *ep;
+	unsigned int ep_state;
 	u32 field;
 	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
 
-	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	ep_state = ep->ep_state;
 	/* Don't ring the doorbell for this endpoint if there are pending
 	 * cancellations because the we don't want to interrupt processing.
 	 */
-	if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
-			&& !(ep_ring->state & EP_HALTED)) {
+	if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
+			&& !(ep_state & EP_HALTED)) {
 		field = xhci_readl(xhci, db_addr) & DB_MASK;
 		xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
 		/* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -354,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 		struct xhci_td *cur_td, struct xhci_dequeue_state *state)
 {
 	struct xhci_virt_device *dev = xhci->devs[slot_id];
-	struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+	struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
 	struct xhci_generic_trb *trb;
 	struct xhci_ep_ctx *ep_ctx;
 	dma_addr_t addr;
@@ -362,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 	state->new_cycle_state = 0;
 	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
-			ep_ring->stopped_trb,
+			dev->eps[ep_index].stopped_trb,
 			&state->new_cycle_state);
 	if (!state->new_deq_seg)
 		BUG();
@@ -442,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
 		union xhci_trb *deq_ptr, u32 cycle_state);
 
 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
-		struct xhci_ring *ep_ring, unsigned int slot_id,
-		unsigned int ep_index, struct xhci_dequeue_state *deq_state)
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_dequeue_state *deq_state)
 {
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+
 	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
 			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
 			deq_state->new_deq_seg,
@@ -461,8 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 	 * if the ring is running, and ringing the doorbell starts the
 	 * ring running.
 	 */
-	ep_ring->state |= SET_DEQ_PENDING;
-	xhci_ring_cmd_db(xhci);
+	ep->ep_state |= SET_DEQ_PENDING;
 }
 
 /*
@@ -481,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 	unsigned int slot_id;
 	unsigned int ep_index;
 	struct xhci_ring *ep_ring;
+	struct xhci_virt_ep *ep;
 	struct list_head *entry;
 	struct xhci_td *cur_td = 0;
 	struct xhci_td *last_unlinked_td;
@@ -493,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 	memset(&deq_state, 0, sizeof(deq_state));
 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
-	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	ep_ring = ep->ring;
 
-	if (list_empty(&ep_ring->cancelled_td_list))
+	if (list_empty(&ep->cancelled_td_list))
 		return;
 
 	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
@@ -503,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 	 * it.  We're also in the event handler, so we can't get re-interrupted
 	 * if another Stop Endpoint command completes
 	 */
-	list_for_each(entry, &ep_ring->cancelled_td_list) {
+	list_for_each(entry, &ep->cancelled_td_list) {
 		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
 		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
 				cur_td->first_trb,
@@ -512,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 		 * If we stopped on the TD we need to cancel, then we have to
 		 * move the xHC endpoint ring dequeue pointer past this TD.
 		 */
-		if (cur_td == ep_ring->stopped_td)
+		if (cur_td == ep->stopped_td)
 			xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
 					&deq_state);
 		else
@@ -523,14 +535,15 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 		 * the cancelled TD list for URB completion later.
 		 */
 		list_del(&cur_td->td_list);
-		ep_ring->cancels_pending--;
+		ep->cancels_pending--;
 	}
 	last_unlinked_td = cur_td;
 
 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
-		xhci_queue_new_dequeue_state(xhci, ep_ring,
+		xhci_queue_new_dequeue_state(xhci,
 				slot_id, ep_index, &deq_state);
+		xhci_ring_cmd_db(xhci);
 	} else {
 		/* Otherwise just ring the doorbell to restart the ring */
 		ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -543,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
 	 * So stop when we've completed the URB for the last TD we unlinked.
 	 */
 	do {
-		cur_td = list_entry(ep_ring->cancelled_td_list.next,
+		cur_td = list_entry(ep->cancelled_td_list.next,
 				struct xhci_td, cancelled_td_list);
 		list_del(&cur_td->cancelled_td_list);
 
@@ -590,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
 	dev = xhci->devs[slot_id];
-	ep_ring = dev->ep_rings[ep_index];
+	ep_ring = dev->eps[ep_index].ring;
 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
 	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
 
@@ -634,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
 				ep_ctx->deq);
 	}
 
-	ep_ring->state &= ~SET_DEQ_PENDING;
+	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
 	ring_ep_doorbell(xhci, slot_id, ep_index);
 }
 
@@ -644,18 +657,60 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
 {
 	int slot_id;
 	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
 
 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 	/* This command will only fail if the endpoint wasn't halted,
 	 * but we don't care.
 	 */
 	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
 			(unsigned int) GET_COMP_CODE(event->status));
 
-	/* Clear our internal halted state and restart the ring */
-	xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
-	ring_ep_doorbell(xhci, slot_id, ep_index);
+	/* HW with the reset endpoint quirk needs to have a configure endpoint
+	 * command complete before the endpoint can be used.  Queue that here
+	 * because the HW can't handle two commands being queued in a row.
+	 */
+	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+		xhci_dbg(xhci, "Queueing configure endpoint command\n");
+		xhci_queue_configure_endpoint(xhci,
+				xhci->devs[slot_id]->in_ctx->dma, slot_id,
+				false);
+		xhci_ring_cmd_db(xhci);
+	} else {
+		/* Clear our internal halted state and restart the ring */
+		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+		ring_ep_doorbell(xhci, slot_id, ep_index);
+	}
+}
+
+/* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1.  Return 0 if the
+ * completed command isn't at the head of the command list.
+ */
+static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct xhci_event_cmd *event)
+{
+	struct xhci_command *command;
+
+	if (list_empty(&virt_dev->cmd_list))
+		return 0;
+
+	command = list_entry(virt_dev->cmd_list.next,
+			struct xhci_command, cmd_list);
+	if (xhci->cmd_ring->dequeue != command->command_trb)
+		return 0;
+
+	command->status =
+		GET_COMP_CODE(event->status);
+	list_del(&command->cmd_list);
+	if (command->completion)
+		complete(command->completion);
+	else
+		xhci_free_command(xhci, command);
+	return 1;
 }
 
 static void handle_cmd_completion(struct xhci_hcd *xhci,
@@ -664,6 +719,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 	int slot_id = TRB_TO_SLOT_ID(event->flags);
 	u64 cmd_dma;
 	dma_addr_t cmd_dequeue_dma;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_virt_device *virt_dev;
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+	unsigned int ep_state;
 
 	cmd_dma = event->cmd_trb;
 	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
@@ -691,6 +751,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 			xhci_free_virt_device(xhci, slot_id);
 		break;
 	case TRB_TYPE(TRB_CONFIG_EP):
+		virt_dev = xhci->devs[slot_id];
+		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+			break;
+		/*
+		 * Configure endpoint commands can come from the USB core
+		 * configuration or alt setting changes, or because the HW
+		 * needed an extra configure endpoint command after a reset
+		 * endpoint command.  In the latter case, the xHCI driver is
+		 * not waiting on the configure endpoint command.
+		 */
+		ctrl_ctx = xhci_get_input_control_ctx(xhci,
+				virt_dev->in_ctx);
+		/* Input ctx add_flags are the endpoint index plus one */
+		ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
+		ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+		if (!ep_ring) {
+			/* This must have been an initial configure endpoint */
+			xhci->devs[slot_id]->cmd_status =
+				GET_COMP_CODE(event->status);
+			complete(&xhci->devs[slot_id]->cmd_completion);
+			break;
+		}
+		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+		xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
+				"state = %d\n", ep_index, ep_state);
+		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+				ep_state & EP_HALTED) {
+			/* Clear our internal halted state and restart ring */
+			xhci->devs[slot_id]->eps[ep_index].ep_state &=
+				~EP_HALTED;
+			ring_ep_doorbell(xhci, slot_id, ep_index);
+		} else {
+			xhci->devs[slot_id]->cmd_status =
+				GET_COMP_CODE(event->status);
+			complete(&xhci->devs[slot_id]->cmd_completion);
+		}
+		break;
+	case TRB_TYPE(TRB_EVAL_CONTEXT):
+		virt_dev = xhci->devs[slot_id];
+		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+			break;
 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
 		complete(&xhci->devs[slot_id]->cmd_completion);
 		break;
@@ -805,7 +906,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 		struct xhci_transfer_event *event)
 {
 	struct xhci_virt_device *xdev;
+	struct xhci_virt_ep *ep;
 	struct xhci_ring *ep_ring;
+	unsigned int slot_id;
 	int ep_index;
 	struct xhci_td *td = 0;
 	dma_addr_t event_dma;
@@ -814,9 +917,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	struct urb *urb = 0;
 	int status = -EINPROGRESS;
 	struct xhci_ep_ctx *ep_ctx;
+	u32 trb_comp_code;
 
 	xhci_dbg(xhci, "In %s\n", __func__);
-	xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+	slot_id = TRB_TO_SLOT_ID(event->flags);
+	xdev = xhci->devs[slot_id];
 	if (!xdev) {
 		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
 		return -ENODEV;
@@ -825,7 +930,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	/* Endpoint ID is 1 based, our index is zero based */
 	ep_index = TRB_TO_EP_ID(event->flags) - 1;
 	xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
-	ep_ring = xdev->ep_rings[ep_index];
+	ep = &xdev->eps[ep_index];
+	ep_ring = ep->ring;
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 	if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
 		xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
@@ -870,7 +976,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			(unsigned int) event->flags);
 
 	/* Look for common error cases */
-	switch (GET_COMP_CODE(event->transfer_len)) {
+	trb_comp_code = GET_COMP_CODE(event->transfer_len);
+	switch (trb_comp_code) {
 	/* Skip codes that require special handling depending on
 	 * transfer type
 	 */
@@ -885,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 		break;
 	case COMP_STALL:
 		xhci_warn(xhci, "WARN: Stalled endpoint\n");
-		ep_ring->state |= EP_HALTED;
+		ep->ep_state |= EP_HALTED;
 		status = -EPIPE;
 		break;
 	case COMP_TRB_ERR:
@@ -913,7 +1020,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	/* Was this a control transfer? */
 	if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
 		xhci_debug_trb(xhci, xhci->event_ring->dequeue);
-		switch (GET_COMP_CODE(event->transfer_len)) {
+		switch (trb_comp_code) {
 		case COMP_SUCCESS:
 			if (event_trb == ep_ring->dequeue) {
 				xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
@@ -928,8 +1035,37 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			break;
 		case COMP_SHORT_TX:
 			xhci_warn(xhci, "WARN: short transfer on control ep\n");
-			status = -EREMOTEIO;
+			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+				status = -EREMOTEIO;
+			else
+				status = 0;
 			break;
+		case COMP_BABBLE:
+			/* The 0.96 spec says a babbling control endpoint
+			 * is not halted. The 0.96 spec says it is.  Some HW
+			 * claims to be 0.95 compliant, but it halts the control
+			 * endpoint anyway.  Check if a babble halted the
+			 * endpoint.
+			 */
+			if (ep_ctx->ep_info != EP_STATE_HALTED)
+				break;
+			/* else fall through */
+		case COMP_STALL:
+			/* Did we transfer part of the data (middle) phase? */
+			if (event_trb != ep_ring->dequeue &&
+					event_trb != td->last_trb)
+				td->urb->actual_length =
+					td->urb->transfer_buffer_length
+					- TRB_LEN(event->transfer_len);
+			else
+				td->urb->actual_length = 0;
+
+			ep->stopped_td = td;
+			ep->stopped_trb = event_trb;
+			xhci_queue_reset_ep(xhci, slot_id, ep_index);
+			xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+			xhci_ring_cmd_db(xhci);
+			goto td_cleanup;
 		default:
 			/* Others already handled above */
 			break;
@@ -943,7 +1079,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			if (event_trb == td->last_trb) {
 				if (td->urb->actual_length != 0) {
 					/* Don't overwrite a previously set error code */
-					if (status == -EINPROGRESS || status == 0)
+					if ((status == -EINPROGRESS ||
+								status == 0) &&
+							(td->urb->transfer_flags
+							 & URB_SHORT_NOT_OK))
 						/* Did we already see a short data stage? */
 						status = -EREMOTEIO;
 				} else {
@@ -952,7 +1091,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				}
 			} else {
 			/* Maybe the event was for the data stage? */
-				if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
+				if (trb_comp_code != COMP_STOP_INVAL) {
 					/* We didn't stop on a link TRB in the middle */
 					td->urb->actual_length =
 						td->urb->transfer_buffer_length -
@@ -964,7 +1103,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			}
 		}
 	} else {
-		switch (GET_COMP_CODE(event->transfer_len)) {
+		switch (trb_comp_code) {
 		case COMP_SUCCESS:
 			/* Double check that the HW transferred everything. */
 			if (event_trb != td->last_trb) {
@@ -975,7 +1114,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				else
 					status = 0;
 			} else {
-				xhci_dbg(xhci, "Successful bulk transfer!\n");
+				if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
+					xhci_dbg(xhci, "Successful bulk "
+							"transfer!\n");
+				else
+					xhci_dbg(xhci, "Successful interrupt "
+							"transfer!\n");
 				status = 0;
 			}
 			break;
@@ -1001,11 +1145,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				td->urb->actual_length =
 					td->urb->transfer_buffer_length -
 					TRB_LEN(event->transfer_len);
-				if (td->urb->actual_length < 0) {
+				if (td->urb->transfer_buffer_length <
+						td->urb->actual_length) {
 					xhci_warn(xhci, "HC gave bad length "
 							"of %d bytes left\n",
 							TRB_LEN(event->transfer_len));
 					td->urb->actual_length = 0;
+					if (td->urb->transfer_flags &
+							URB_SHORT_NOT_OK)
+						status = -EREMOTEIO;
+					else
+						status = 0;
 				}
 				/* Don't overwrite a previously set error code */
 				if (status == -EINPROGRESS) {
@@ -1041,30 +1191,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			/* If the ring didn't stop on a Link or No-op TRB, add
 			 * in the actual bytes transferred from the Normal TRB
 			 */
-			if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+			if (trb_comp_code != COMP_STOP_INVAL)
 				td->urb->actual_length +=
 					TRB_LEN(cur_trb->generic.field[2]) -
 					TRB_LEN(event->transfer_len);
 		}
 	}
-	if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
-			GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+	if (trb_comp_code == COMP_STOP_INVAL ||
+			trb_comp_code == COMP_STOP) {
 		/* The Endpoint Stop Command completion will take care of any
 		 * stopped TDs.  A stopped TD may be restarted, so don't update
 		 * the ring dequeue pointer or take this TD off any lists yet.
 		 */
-		ep_ring->stopped_td = td;
-		ep_ring->stopped_trb = event_trb;
+		ep->stopped_td = td;
+		ep->stopped_trb = event_trb;
 	} else {
-		if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
+		if (trb_comp_code == COMP_STALL ||
+				trb_comp_code == COMP_BABBLE) {
 			/* The transfer is completed from the driver's
 			 * perspective, but we need to issue a set dequeue
 			 * command for this stalled endpoint to move the dequeue
 			 * pointer past the TD.  We can't do that here because
 			 * the halt condition must be cleared first.
 			 */
-			ep_ring->stopped_td = td;
-			ep_ring->stopped_trb = event_trb;
+			ep->stopped_td = td;
+			ep->stopped_trb = event_trb;
 		} else {
 			/* Update ring dequeue pointer */
 			while (ep_ring->dequeue != td->last_trb)
@@ -1072,16 +1223,41 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			inc_deq(xhci, ep_ring, false);
 		}
 
+td_cleanup:
 		/* Clean up the endpoint's TD list */
 		urb = td->urb;
+		/* Do one last check of the actual transfer length.
+		 * If the host controller said we transferred more data than
+		 * the buffer length, urb->actual_length will be a very big
+		 * number (since it's unsigned).  Play it safe and say we didn't
+		 * transfer anything.
+		 */
+		if (urb->actual_length > urb->transfer_buffer_length) {
+			xhci_warn(xhci, "URB transfer length is wrong, "
+					"xHC issue? req. len = %u, "
+					"act. len = %u\n",
+					urb->transfer_buffer_length,
+					urb->actual_length);
+			urb->actual_length = 0;
+			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+				status = -EREMOTEIO;
+			else
+				status = 0;
+		}
 		list_del(&td->td_list);
 		/* Was this TD slated to be cancelled but completed anyway? */
 		if (!list_empty(&td->cancelled_td_list)) {
 			list_del(&td->cancelled_td_list);
-			ep_ring->cancels_pending--;
+			ep->cancels_pending--;
 		}
-		/* Leave the TD around for the reset endpoint function to use */
-		if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
+		/* Leave the TD around for the reset endpoint function to use
+		 * (but only if it's not a control endpoint, since we already
+		 * queued the Set TR dequeue pointer command for stalled
+		 * control endpoints).
+		 */
+		if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+			(trb_comp_code != COMP_STALL &&
+				trb_comp_code != COMP_BABBLE)) {
 			kfree(td);
 		}
 		urb->hcpriv = NULL;
@@ -1094,7 +1270,7 @@ cleanup:
 	if (urb) {
 		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
 		xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
-				urb, td->urb->actual_length, status);
+				urb, urb->actual_length, status);
 		spin_unlock(&xhci->lock);
 		usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
 		spin_lock(&xhci->lock);
@@ -1235,7 +1411,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 {
 	int ret;
 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
-	ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+	ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
 			ep_ctx->ep_info & EP_STATE_MASK,
 			num_trbs, mem_flags);
 	if (ret)
@@ -1255,9 +1431,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 	(*td)->urb = urb;
 	urb->hcpriv = (void *) (*td);
 	/* Add this TD to the tail of the endpoint ring's TD list */
-	list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
-	(*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
-	(*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+	list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
+	(*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
+	(*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
 
 	return 0;
 }
@@ -1335,6 +1511,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
 	ring_ep_doorbell(xhci, slot_id, ep_index);
 }
 
+/*
+ * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
+ * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
+			xhci->devs[slot_id]->out_ctx, ep_index);
+	int xhci_interval;
+	int ep_interval;
+
+	xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+	ep_interval = urb->interval;
+	/* Convert to microframes */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		ep_interval *= 8;
+	/* FIXME change this to a warning and a suggestion to use the new API
+	 * to set the polling interval (once the API is added).
+	 */
+	if (xhci_interval != ep_interval) {
+		if (!printk_ratelimit())
+			dev_dbg(&urb->dev->dev, "Driver uses different interval"
+					" (%d microframe%s) than xHCI "
+					"(%d microframe%s)\n",
+					ep_interval,
+					ep_interval == 1 ? "" : "s",
+					xhci_interval,
+					xhci_interval == 1 ? "" : "s");
+		urb->interval = xhci_interval;
+		/* Convert back to frames for LS/FS devices */
+		if (urb->dev->speed == USB_SPEED_LOW ||
+				urb->dev->speed == USB_SPEED_FULL)
+			urb->interval /= 8;
+	}
+	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		struct urb *urb, int slot_id, unsigned int ep_index)
 {
@@ -1350,7 +1567,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	struct xhci_generic_trb *start_trb;
 	int start_cycle;
 
-	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 	num_trbs = count_sg_trbs_needed(xhci, urb);
 	num_sgs = urb->num_sgs;
 
@@ -1483,7 +1700,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	if (urb->sg)
 		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
 
-	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
 	num_trbs = 0;
 	/* How much data is (potentially) left before the 64KB boundary? */
@@ -1594,7 +1811,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	u32 field, length_field;
 	struct xhci_td *td;
 
-	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
 	/*
 	 * Need to copy setup packet into setup TRB, so we can't use the setup
@@ -1677,12 +1894,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
 /****		Command Ring Operations		****/
 
-/* Generic function for queueing a command TRB on the command ring */
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+/* Generic function for queueing a command TRB on the command ring.
+ * Check to make sure there's room on the command ring for one command TRB.
+ * Also check that there's room reserved for commands that must not fail.
+ * If this is a command that must not fail, meaning command_must_succeed = TRUE,
+ * then only check for the number of reserved spots.
+ * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
+ * because the command event handler may want to resubmit a failed command.
+ */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
+		u32 field3, u32 field4, bool command_must_succeed)
 {
-	if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+	if (!command_must_succeed)
+		reserved_trbs++;
+
+	if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
 		if (!in_interrupt())
 			xhci_err(xhci, "ERR: No room for command on command ring\n");
+		if (command_must_succeed)
+			xhci_err(xhci, "ERR: Reserved TRB counting for "
+					"unfailable commands failed.\n");
 		return -ENOMEM;
 	}
 	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
@@ -1693,7 +1925,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel
 /* Queue a no-op command on the command ring */
 static int queue_cmd_noop(struct xhci_hcd *xhci)
 {
-	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
 }
 
 /*
@@ -1712,7 +1944,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci)
 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
 {
 	return queue_command(xhci, 0, 0, 0,
-			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
 }
 
 /* Queue an address device command TRB */
@@ -1721,16 +1953,28 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
 {
 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
 			upper_32_bits(in_ctx_ptr), 0,
-			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
+			false);
 }
 
 /* Queue a configure endpoint command TRB */
 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+		u32 slot_id, bool command_must_succeed)
+{
+	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+			upper_32_bits(in_ctx_ptr), 0,
+			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
+			command_must_succeed);
+}
+
+/* Queue an evaluate context command TRB */
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
 		u32 slot_id)
 {
 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
 			upper_32_bits(in_ctx_ptr), 0,
-			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
+			false);
 }
 
 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
@@ -1741,7 +1985,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
 	u32 type = TRB_TYPE(TRB_STOP_RING);
 
 	return queue_command(xhci, 0, 0, 0,
-			trb_slot_id | trb_ep_index | type);
+			trb_slot_id | trb_ep_index | type, false);
 }
 
 /* Set Transfer Ring Dequeue Pointer command.
@@ -1765,7 +2009,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
 	}
 	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
 			upper_32_bits(addr), 0,
-			trb_slot_id | trb_ep_index | type);
+			trb_slot_id | trb_ep_index | type, false);
 }
 
 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1775,5 +2019,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
 	u32 type = TRB_TYPE(TRB_RESET_EP);
 
-	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
+	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
+			false);
 }