Commit ab565f7e authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'fixes-for-v5.9-rc2' of...

Merge tag 'fixes-for-v5.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus

Felipe writes:

USB: fixes for v5.9-rc

Three ZLP fixes on dwc3 and a resource leak fix on the TCM gadget
Signed-off-by: default avatarFelipe Balbi <balbi@kernel.org>

* tag 'fixes-for-v5.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb:
  usb: dwc3: gadget: Handle ZLP for sg requests
  usb: dwc3: gadget: Fix handling ZLP
  usb: dwc3: gadget: Don't setup more than requested
  usb: gadget: f_tcm: Fix some resource leaks in some error paths
parents d5643d22 bc9a2e22
...@@ -1054,27 +1054,25 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, ...@@ -1054,27 +1054,25 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
* dwc3_prepare_one_trb - setup one TRB from one request * dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared * @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer * @req: dwc3_request pointer
* @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next? * @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type. * @node: only for isochronous endpoints. First TRB needs different type.
*/ */
static void dwc3_prepare_one_trb(struct dwc3_ep *dep, static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, unsigned chain, unsigned node) struct dwc3_request *req, unsigned int trb_length,
unsigned chain, unsigned node)
{ {
struct dwc3_trb *trb; struct dwc3_trb *trb;
unsigned int length;
dma_addr_t dma; dma_addr_t dma;
unsigned stream_id = req->request.stream_id; unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok; unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt; unsigned no_interrupt = req->request.no_interrupt;
unsigned is_last = req->request.is_last; unsigned is_last = req->request.is_last;
if (req->request.num_sgs > 0) { if (req->request.num_sgs > 0)
length = sg_dma_len(req->start_sg);
dma = sg_dma_address(req->start_sg); dma = sg_dma_address(req->start_sg);
} else { else
length = req->request.length;
dma = req->request.dma; dma = req->request.dma;
}
trb = &dep->trb_pool[dep->trb_enqueue]; trb = &dep->trb_pool[dep->trb_enqueue];
...@@ -1086,7 +1084,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, ...@@ -1086,7 +1084,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++; req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
stream_id, short_not_ok, no_interrupt, is_last); stream_id, short_not_ok, no_interrupt, is_last);
} }
...@@ -1096,16 +1094,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, ...@@ -1096,16 +1094,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
struct scatterlist *sg = req->start_sg; struct scatterlist *sg = req->start_sg;
struct scatterlist *s; struct scatterlist *s;
int i; int i;
unsigned int length = req->request.length;
unsigned int remaining = req->request.num_mapped_sgs unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs; - req->num_queued_sgs;
/*
* If we resume preparing the request, then get the remaining length of
* the request and resume where we left off.
*/
for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
length -= sg_dma_len(s);
for_each_sg(sg, s, remaining, i) { for_each_sg(sg, s, remaining, i) {
unsigned int length = req->request.length;
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp; unsigned int rem = length % maxp;
unsigned int trb_length;
unsigned chain = true; unsigned chain = true;
trb_length = min_t(unsigned int, length, sg_dma_len(s));
length -= trb_length;
/* /*
* IOMMU driver is coalescing the list of sgs which shares a * IOMMU driver is coalescing the list of sgs which shares a
* page boundary into one and giving it to USB driver. With * page boundary into one and giving it to USB driver. With
...@@ -1113,7 +1122,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, ...@@ -1113,7 +1122,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
* sgs passed. So mark the chain bit to false if it isthe last * sgs passed. So mark the chain bit to false if it isthe last
* mapped sg. * mapped sg.
*/ */
if (i == remaining - 1) if ((i == remaining - 1) || !length)
chain = false; chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
...@@ -1123,7 +1132,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, ...@@ -1123,7 +1132,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->needs_extra_trb = true; req->needs_extra_trb = true;
/* prepare normal TRB */ /* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, i); dwc3_prepare_one_trb(dep, req, trb_length, true, i);
/* Now prepare one extra TRB to align transfer size */ /* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue]; trb = &dep->trb_pool[dep->trb_enqueue];
...@@ -1134,8 +1143,39 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, ...@@ -1134,8 +1143,39 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->request.short_not_ok, req->request.short_not_ok,
req->request.no_interrupt, req->request.no_interrupt,
req->request.is_last); req->request.is_last);
} else if (req->request.zero && req->request.length &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
!rem && !chain) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
req->needs_extra_trb = true;
/* Prepare normal TRB */
dwc3_prepare_one_trb(dep, req, trb_length, true, i);
/* Prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
!req->direction, 1,
req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
/* Prepare one more TRB to handle MPS alignment */
if (!req->direction) {
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
false, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
}
} else { } else {
dwc3_prepare_one_trb(dep, req, chain, i); dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
} }
/* /*
...@@ -1150,6 +1190,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, ...@@ -1150,6 +1190,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->num_queued_sgs++; req->num_queued_sgs++;
/*
* The number of pending SG entries may not correspond to the
* number of mapped SG entries. If all the data are queued, then
* don't include unused SG entries.
*/
if (length == 0) {
req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
break;
}
if (!dwc3_calc_trbs_left(dep)) if (!dwc3_calc_trbs_left(dep))
break; break;
} }
...@@ -1169,7 +1219,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, ...@@ -1169,7 +1219,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true; req->needs_extra_trb = true;
/* prepare normal TRB */ /* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, 0); dwc3_prepare_one_trb(dep, req, length, true, 0);
/* Now prepare one extra TRB to align transfer size */ /* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue]; trb = &dep->trb_pool[dep->trb_enqueue];
...@@ -1180,6 +1230,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, ...@@ -1180,6 +1230,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->request.no_interrupt, req->request.no_interrupt,
req->request.is_last); req->request.is_last);
} else if (req->request.zero && req->request.length && } else if (req->request.zero && req->request.length &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp))) { (IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc; struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb; struct dwc3_trb *trb;
...@@ -1187,18 +1238,29 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, ...@@ -1187,18 +1238,29 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true; req->needs_extra_trb = true;
/* prepare normal TRB */ /* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, 0); dwc3_prepare_one_trb(dep, req, length, true, 0);
/* Now prepare one extra TRB to handle ZLP */ /* Prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue]; trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++; req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
false, 1, req->request.stream_id, !req->direction, 1, req->request.stream_id,
req->request.short_not_ok, req->request.short_not_ok,
req->request.no_interrupt, req->request.no_interrupt,
req->request.is_last); req->request.is_last);
/* Prepare one more TRB to handle MPS alignment for OUT */
if (!req->direction) {
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
false, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
}
} else { } else {
dwc3_prepare_one_trb(dep, req, false, 0); dwc3_prepare_one_trb(dep, req, length, false, 0);
} }
} }
...@@ -2671,8 +2733,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, ...@@ -2671,8 +2733,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
status); status);
if (req->needs_extra_trb) { if (req->needs_extra_trb) {
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status); status);
/* Reclaim MPS padding TRB for ZLP */
if (!req->direction && req->request.zero && req->request.length &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp)))
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
req->needs_extra_trb = false; req->needs_extra_trb = false;
} }
......
...@@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) ...@@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
goto err_sts; goto err_sts;
return 0; return 0;
err_sts: err_sts:
usb_ep_free_request(fu->ep_status, stream->req_status);
stream->req_status = NULL;
err_out:
usb_ep_free_request(fu->ep_out, stream->req_out); usb_ep_free_request(fu->ep_out, stream->req_out);
stream->req_out = NULL; stream->req_out = NULL;
err_out:
usb_ep_free_request(fu->ep_in, stream->req_in);
stream->req_in = NULL;
out: out:
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment