Commit 5a83f04a authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: properly prepare zero packet TD after normal bulk TD.

If a zero-length packet is needed after a bulk transfer, then an
additional zero length TD was prepared before enqueueing the bulk transfer
This set up the zero packet TD structure with incorrect td->start_seg
and td->first_trb pointers.

Prepare the zero packet TD after the data bulk TD is enqueued instead.
It sets these pointers correctly.

This change also simplifies unnecessary complexity related to keeping
track of the last trb when enqueuing trbs.
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5a5a0b1a
......@@ -3125,8 +3125,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_td *td;
struct xhci_generic_trb *start_trb;
struct scatterlist *sg = NULL;
bool more_trbs_coming;
bool zero_length_needed;
bool more_trbs_coming = true;
bool need_zero_pkt = false;
unsigned int num_trbs, last_trb_num, i;
unsigned int start_cycle, num_sgs = 0;
unsigned int running_total, block_len, trb_buff_len, full_len;
......@@ -3157,17 +3157,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
last_trb_num = num_trbs - 1;
/* Deal with URB_ZERO_PACKET - need one more td/trb */
zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
urb_priv->length == 2;
if (zero_length_needed) {
num_trbs++;
xhci_dbg(xhci, "Creating zero length td.\n");
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
if (unlikely(ret < 0))
return ret;
}
if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
need_zero_pkt = true;
td = urb_priv->td[0];
......@@ -3225,12 +3216,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_CHAIN;
} else {
field |= TRB_IOC;
if (i == last_trb_num)
more_trbs_coming = need_zero_pkt;
td->last_trb = ring->enqueue;
else if (zero_length_needed) {
trb_buff_len = 0;
urb_priv->td[1]->last_trb = ring->enqueue;
}
}
/* Only set interrupt on short packet for IN endpoints */
......@@ -3246,10 +3233,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
if (i < num_trbs - 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
......@@ -3271,6 +3254,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
if (need_zero_pkt) {
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
urb_priv->td[1]->last_trb = ring->enqueue;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
}
check_trb_math(urb, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment