Commit 4e9e9200 authored by Pete Zaitcev's avatar Pete Zaitcev Committed by Greg Kroah-Hartman

USB: usbmon: end ugly tricks with DMA peeking

This patch fixes crashes when usbmon attempts to access GART aperture.
The old code attempted to take a bus address and convert it into a
virtual address, which clearly was impossible on systems with actual
IOMMUs. Let us not persist in this foolishness, and use transfer_buffer
in all cases instead.

I think downsides are negligible. The ones I see are:
 - A driver may pass an address of one buffer down as transfer_buffer,
   and entirely different entity mapped for DMA, resulting in misleading
   output of usbmon. Note, however, that PIO based controllers would
   do transfer the same data that usbmon sees here.
 - Out of tree drivers may crash usbmon if they store garbage in
   transfer_buffer. I inspected the in-tree drivers, and clarified
   the documentation in comments.
 - Drivers that use get_user_pages will not be possible to monitor.
   I only found one driver with this problem (drivers/staging/rspiusb).
 - Same happens with with usb_storage transferring from highmem, but
   it works fine on 64-bit systems, so I think it's not a concern.
   At least we don't crash anymore.

Why didn't we do this in 2.6.10? That's because back in those days
it was popular not to fill in transfer_buffer, so almost all
traffic would be invisible (e.g. all of HID was like that).
But now, the tree is almost 100% PIO friendly, so we can do the
right thing at last.
Signed-off-by: default avatarPete Zaitcev <zaitcev@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent f4e2332c
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
# Makefile for USB monitor # Makefile for USB monitor
# #
usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o
obj-$(CONFIG_USB_MON) += usbmon.o obj-$(CONFIG_USB_MON) += usbmon.o
...@@ -220,9 +220,8 @@ static void mon_free_buff(struct mon_pgmap *map, int npages); ...@@ -220,9 +220,8 @@ static void mon_free_buff(struct mon_pgmap *map, int npages);
/* /*
* This is a "chunked memcpy". It does not manipulate any counters. * This is a "chunked memcpy". It does not manipulate any counters.
* But it returns the new offset for repeated application.
*/ */
unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, static void mon_copy_to_buff(const struct mon_reader_bin *this,
unsigned int off, const unsigned char *from, unsigned int length) unsigned int off, const unsigned char *from, unsigned int length)
{ {
unsigned int step_len; unsigned int step_len;
...@@ -247,7 +246,6 @@ unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, ...@@ -247,7 +246,6 @@ unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
from += step_len; from += step_len;
length -= step_len; length -= step_len;
} }
return off;
} }
/* /*
...@@ -400,15 +398,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp, ...@@ -400,15 +398,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp,
unsigned int offset, struct urb *urb, unsigned int length) unsigned int offset, struct urb *urb, unsigned int length)
{ {
if (urb->dev->bus->uses_dma &&
(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
return 0;
}
if (urb->transfer_buffer == NULL) if (urb->transfer_buffer == NULL)
return 'Z'; return 'Z';
mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
return 0; return 0;
} }
...@@ -635,7 +626,6 @@ static int mon_bin_open(struct inode *inode, struct file *file) ...@@ -635,7 +626,6 @@ static int mon_bin_open(struct inode *inode, struct file *file)
spin_lock_init(&rp->b_lock); spin_lock_init(&rp->b_lock);
init_waitqueue_head(&rp->b_wait); init_waitqueue_head(&rp->b_wait);
mutex_init(&rp->fetch_lock); mutex_init(&rp->fetch_lock);
rp->b_size = BUFF_DFL; rp->b_size = BUFF_DFL;
size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
......
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_dma.c: Library which snoops on DMA areas.
*
* Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <linux/usb.h> /* Only needed for declarations in usb_mon.h */
#include "usb_mon.h"
/*
* PC-compatibles, are, fortunately, sufficiently cache-coherent for this.
*/
#if defined(__i386__) || defined(__x86_64__) /* CONFIG_ARCH_I386 doesn't exit */
#define MON_HAS_UNMAP 1
#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT)
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
struct page *pg;
unsigned long flags;
unsigned char *map;
unsigned char *ptr;
/*
* On i386, a DMA handle is the "physical" address of a page.
* In other words, the bus address is equal to physical address.
* There is no IOMMU.
*/
pg = phys_to_page(dma_addr);
/*
* We are called from hardware IRQs in case of callbacks.
* But we can be called from softirq or process context in case
* of submissions. In such case, we need to protect KM_IRQ0.
*/
local_irq_save(flags);
map = kmap_atomic(pg, KM_IRQ0);
ptr = map + (dma_addr & (PAGE_SIZE-1));
memcpy(dst, ptr, len);
kunmap_atomic(map, KM_IRQ0);
local_irq_restore(flags);
return 0;
}
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
unsigned long flags;
unsigned int step_len;
struct page *pg;
unsigned char *map;
unsigned long page_off, page_len;
local_irq_save(flags);
while (length) {
/* compute number of bytes we are going to copy in this page */
step_len = length;
page_off = dma_addr & (PAGE_SIZE-1);
page_len = PAGE_SIZE - page_off;
if (page_len < step_len)
step_len = page_len;
/* copy data and advance pointers */
pg = phys_to_page(dma_addr);
map = kmap_atomic(pg, KM_IRQ0);
offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
kunmap_atomic(map, KM_IRQ0);
dma_addr += step_len;
length -= step_len;
}
local_irq_restore(flags);
}
#endif /* __i386__ */
#ifndef MON_HAS_UNMAP
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
return 'D';
}
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
;
}
#endif /* MON_HAS_UNMAP */
...@@ -361,7 +361,6 @@ static int __init mon_init(void) ...@@ -361,7 +361,6 @@ static int __init mon_init(void)
} }
// MOD_INC_USE_COUNT(which_module?); // MOD_INC_USE_COUNT(which_module?);
mutex_lock(&usb_bus_list_lock); mutex_lock(&usb_bus_list_lock);
list_for_each_entry (ubus, &usb_bus_list, bus_list) { list_for_each_entry (ubus, &usb_bus_list, bus_list) {
mon_bus_init(ubus); mon_bus_init(ubus);
......
...@@ -150,20 +150,6 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, ...@@ -150,20 +150,6 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
return '>'; return '>';
} }
/*
* The check to see if it's safe to poke at data has an enormous
* number of corner cases, but it seems that the following is
* more or less safe.
*
* We do not even try to look at transfer_buffer, because it can
* contain non-NULL garbage in case the upper level promised to
* set DMA for the HCD.
*/
if (urb->dev->bus->uses_dma &&
(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
return mon_dmapeek(ep->data, urb->transfer_dma, len);
}
if (urb->transfer_buffer == NULL) if (urb->transfer_buffer == NULL)
return 'Z'; /* '0' would be not as pretty. */ return 'Z'; /* '0' would be not as pretty. */
......
...@@ -64,20 +64,6 @@ void mon_text_exit(void); ...@@ -64,20 +64,6 @@ void mon_text_exit(void);
int __init mon_bin_init(void); int __init mon_bin_init(void);
void mon_bin_exit(void); void mon_bin_exit(void);
/*
* DMA interface.
*
* XXX The vectored side needs a serious re-thinking. Abstracting vectors,
* like in Paolo's original patch, produces a double pkmap. We need an idea.
*/
extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
struct mon_reader_bin;
extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int len);
extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
unsigned int offset, const unsigned char *from, unsigned int len);
/* /*
*/ */
extern struct mutex mon_lock; extern struct mutex mon_lock;
......
...@@ -1036,9 +1036,10 @@ typedef void (*usb_complete_t)(struct urb *); ...@@ -1036,9 +1036,10 @@ typedef void (*usb_complete_t)(struct urb *);
* @transfer_flags: A variety of flags may be used to affect how URB * @transfer_flags: A variety of flags may be used to affect how URB
* submission, unlinking, or operation are handled. Different * submission, unlinking, or operation are handled. Different
* kinds of URB can use different flags. * kinds of URB can use different flags.
* @transfer_buffer: This identifies the buffer to (or from) which * @transfer_buffer: This identifies the buffer to (or from) which the I/O
* the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set
* is set). This buffer must be suitable for DMA; allocate it with * (however, do not leave garbage in transfer_buffer even then).
* This buffer must be suitable for DMA; allocate it with
* kmalloc() or equivalent. For transfers to "in" endpoints, contents * kmalloc() or equivalent. For transfers to "in" endpoints, contents
* of this buffer will be modified. This buffer is used for the data * of this buffer will be modified. This buffer is used for the data
* stage of control transfers. * stage of control transfers.
...@@ -1104,9 +1105,15 @@ typedef void (*usb_complete_t)(struct urb *); ...@@ -1104,9 +1105,15 @@ typedef void (*usb_complete_t)(struct urb *);
* allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map(). * allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map().
* When these transfer flags are provided, host controller drivers will * When these transfer flags are provided, host controller drivers will
* attempt to use the dma addresses found in the transfer_dma and/or * attempt to use the dma addresses found in the transfer_dma and/or
* setup_dma fields rather than determining a dma address themselves. (Note * setup_dma fields rather than determining a dma address themselves.
* that transfer_buffer and setup_packet must still be set because not all *
* host controllers use DMA, nor do virtual root hubs). * Note that transfer_buffer must still be set if the controller
* does not support DMA (as indicated by bus.uses_dma) and when talking
* to root hub. If you have to trasfer between highmem zone and the device
* on such controller, create a bounce buffer or bail out with an error.
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
* capable, assign NULL to it, so that usbmon knows not to use the value.
* The setup_packet must always be set, so it cannot be located in highmem.
* *
* Initialization: * Initialization:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment