Commit 7b65fe12 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Greg Kroah-Hartman

usb: remove commented out dma wrappers

These wrappers have never seen use and have been commented out
for a long time.  Remove them for good.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20190903084615.19161-6-hch@lst.deSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ac2658e0
......@@ -933,228 +933,6 @@ void usb_free_coherent(struct usb_device *dev, size_t size, void *addr,
}
EXPORT_SYMBOL_GPL(usb_free_coherent);
/**
* usb_buffer_map - create DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer/setup_packet will be mapped
*
* URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation
* succeeds. If the device is connected to this system through a non-DMA
* controller, this operation always succeeds.
*
* This call would normally be used for an urb which is reused, perhaps
* as the target of a large periodic transfer, with usb_buffer_dmasync()
* calls to synchronize memory and dma state.
*
* Reverse the effect of this call with usb_buffer_unmap().
*
* Return: Either %NULL (indicating no buffer could be mapped), or @urb.
*
*/
#if 0
struct urb *usb_buffer_map(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->sysdev))
return NULL;
if (controller->dma_mask) {
urb->transfer_dma = dma_map_single(controller,
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
/* FIXME generic api broken like pci, can't report errors */
/* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */
} else
urb->transfer_dma = ~0;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return urb;
}
EXPORT_SYMBOL_GPL(usb_buffer_map);
#endif /* 0 */
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
* XXX the buffer from device to cpu or vice verse, and thusly use the
* XXX appropriate _for_{cpu,device}() method. -DaveM
*/
#if 0
/**
* usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
* @urb: urb whose transfer_buffer/setup_packet will be synchronized
*/
void usb_buffer_dmasync(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
dma_sync_single_for_cpu(controller,
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (usb_pipecontrol(urb->pipe))
dma_sync_single_for_cpu(controller,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
}
}
EXPORT_SYMBOL_GPL(usb_buffer_dmasync);
#endif
/**
* usb_buffer_unmap - free DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be unmapped
*
* Reverses the effect of usb_buffer_map().
*/
#if 0
void usb_buffer_unmap(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
dma_unmap_single(controller,
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif /* 0 */
#if 0
/**
* usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to map
* @nents: the number of entries in the scatterlist
*
* Return: Either < 0 (indicating no buffers could be mapped), or the
* number of DMA mapping array entries in the scatterlist.
*
* Note:
* The caller is responsible for placing the resulting DMA addresses from
* the scatterlist into URB transfer buffer pointers, and for setting the
* URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs.
*
* Top I/O rates come from queuing URBs, instead of waiting for each one
* to complete before starting the next I/O. This is particularly easy
* to do with scatterlists. Just allocate and submit one URB for each DMA
* mapping entry returned, stopping on the first error or when all succeed.
* Better yet, use the usb_sg_*() calls, which do that (and more) for you.
*
* This call would normally be used when translating scatterlist requests,
* rather than usb_buffer_map(), since on some hardware (with IOMMUs) it
* may be able to coalesce mappings for improved I/O efficiency.
*
* Reverse the effect of this call with usb_buffer_unmap_sg().
*/
int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int nents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->sysdev)
|| !controller->dma_mask)
return -EINVAL;
/* FIXME generic api broken like pci, can't report errors */
return dma_map_sg(controller, sg, nents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
#endif
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
* XXX the buffer from device to cpu or vice verse, and thusly use the
* XXX appropriate _for_{cpu,device}() method. -DaveM
*/
#if 0
/**
* usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s)
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to synchronize
* @n_hw_ents: the positive return value from usb_buffer_map_sg
*
* Use this when you are re-using a scatterlist's data buffers for
* another USB request.
*/
void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int n_hw_ents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif
#if 0
/**
* usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to unmap
* @n_hw_ents: the positive return value from usb_buffer_map_sg
*
* Reverses the effect of usb_buffer_map_sg().
*/
void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int n_hw_ents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
dma_unmap_sg(controller, sg, n_hw_ents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
#endif
/*
* Notifications of device and interface registration
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment