summary refs log tree commit diff
path: root/drivers/usb/host/xhci.h
diff options
context:
space:
mode:
authorMathias Nyman <mathias.nyman@linux.intel.com>2016-06-21 10:58:02 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-06-26 11:43:39 -0700
commitf9c589e142d04b8a19eb382162f804d17102b5ed (patch)
tree89b9be15971379d0e6d8655138322239c02f8df9 /drivers/usb/host/xhci.h
parent474ed23a6257b552ab48585c1511eac98653b4e8 (diff)
downloadlinux-f9c589e142d04b8a19eb382162f804d17102b5ed.tar.gz
xhci: TD-fragment, align the unsplittable case with a bounce buffer
If the last trb before a link is not packet size aligned, and is not
splittable then use a bounce buffer for that chunk of max packet size
unalignable data.

Allocate a max packet size bounce buffer for every segment of a bulk
endpoint ring at the same time as allocating the ring.
If we need to align the data before the link trb in that segment then
copy the data to the segment bounce buffer, dma map it, and enqueue it.
Once the td finishes, or is cancelled, unmap it.

For in transfers we need to first map the bounce buffer, then queue it,
after it finishes, copy the bounce buffer to the original sg list, and
finally unmap it

Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/xhci.h')
-rw-r--r--drivers/usb/host/xhci.h10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b0b8d0f8791a..b2c1dc5dc0f3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1347,6 +1347,11 @@ struct xhci_segment {
 	/* private to HCD */
 	struct xhci_segment	*next;
 	dma_addr_t		dma;
+	/* Max packet sized bounce buffer for td-fragmant alignment */
+	dma_addr_t		bounce_dma;
+	void			*bounce_buf;
+	unsigned int		bounce_offs;
+	unsigned int		bounce_len;
 };
 
 struct xhci_td {
@@ -1356,6 +1361,7 @@ struct xhci_td {
 	struct xhci_segment	*start_seg;
 	union xhci_trb		*first_trb;
 	union xhci_trb		*last_trb;
+	struct xhci_segment	*bounce_seg;
 	/* actual_length of the URB has already been set */
 	bool			urb_length_set;
 };
@@ -1405,6 +1411,7 @@ struct xhci_ring {
 	unsigned int		num_segs;
 	unsigned int		num_trbs_free;
 	unsigned int		num_trbs_free_temp;
+	unsigned int		bounce_buf_len;
 	enum xhci_ring_type	type;
 	bool			last_td_was_short;
 	struct radix_tree_root	*trb_address_map;
@@ -1807,7 +1814,8 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
 		unsigned int ep_index);
 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs,
-		unsigned int num_streams, gfp_t flags);
+		unsigned int num_streams,
+		unsigned int max_packet, gfp_t flags);
 void xhci_free_stream_info(struct xhci_hcd *xhci,
 		struct xhci_stream_info *stream_info);
 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,