Commit 0b6c4857 authored by Stefan Richter's avatar Stefan Richter

firewire: core: fix DMA mapping direction

Seen with recent libdc1394:  If a client mmap()s the buffer of an
isochronous reception buffer with PROT_READ|PROT_WRITE instead of just
PROT_READ, firewire-core sets the wrong DMA mapping direction during
buffer initialization.

The fix is to split fw_iso_buffer_init() into allocation and DMA mapping
and to perform the latter after both buffer and DMA context were
allocated.  Buffer allocation and context allocation may happen in any
order, but we need the context type (reception or transmission) in order
to set the DMA direction of the buffer.
Signed-off-by: default avatarStefan Richter <stefanr@s5r6.in-berlin.de>
parent fe2af11c
......@@ -22,6 +22,7 @@
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
......@@ -70,6 +71,7 @@ struct client {
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
......@@ -959,11 +961,20 @@ static void iso_mc_callback(struct fw_iso_context *context,
sizeof(e->interrupt), NULL, 0);
}
static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
{
if (context->type == FW_ISO_CONTEXT_TRANSMIT)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
fw_iso_callback_t cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
......@@ -1004,8 +1015,21 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (client->iso_context != NULL) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return -EBUSY;
}
if (!client->buffer_is_mapped) {
ret = fw_iso_buffer_map_dma(&client->buffer,
client->device->card,
iso_dma_direction(context));
if (ret < 0) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return ret;
}
client->buffer_is_mapped = true;
}
client->iso_closure = a->closure;
client->iso_context = context;
spin_unlock_irq(&client->lock);
......@@ -1651,7 +1675,6 @@ static long fw_device_op_compat_ioctl(struct file *file,
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
enum dma_data_direction direction;
unsigned long size;
int page_count, ret;
......@@ -1674,20 +1697,28 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (size & ~PAGE_MASK)
return -EINVAL;
if (vma->vm_flags & VM_WRITE)
direction = DMA_TO_DEVICE;
else
direction = DMA_FROM_DEVICE;
ret = fw_iso_buffer_init(&client->buffer, client->device->card,
page_count, direction);
ret = fw_iso_buffer_alloc(&client->buffer, page_count);
if (ret < 0)
return ret;
ret = fw_iso_buffer_map(&client->buffer, vma);
spin_lock_irq(&client->lock);
if (client->iso_context) {
ret = fw_iso_buffer_map_dma(&client->buffer,
client->device->card,
iso_dma_direction(client->iso_context));
client->buffer_is_mapped = (ret == 0);
}
spin_unlock_irq(&client->lock);
if (ret < 0)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
goto fail;
ret = fw_iso_buffer_map_vma(&client->buffer, vma);
if (ret < 0)
goto fail;
return 0;
fail:
fw_iso_buffer_destroy(&client->buffer, client->device->card);
return ret;
}
......
......@@ -39,52 +39,73 @@
* Isochronous DMA context management
*/
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{
int i, j;
dma_addr_t address;
buffer->page_count = page_count;
buffer->direction = direction;
int i;
buffer->page_count = 0;
buffer->page_count_mapped = 0;
buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
GFP_KERNEL);
if (buffer->pages == NULL)
goto out;
return -ENOMEM;
for (i = 0; i < buffer->page_count; i++) {
for (i = 0; i < page_count; i++) {
buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (buffer->pages[i] == NULL)
goto out_pages;
break;
}
buffer->page_count = i;
if (i < page_count) {
fw_iso_buffer_destroy(buffer, NULL);
return -ENOMEM;
}
return 0;
}
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
dma_addr_t address;
int i;
buffer->direction = direction;
for (i = 0; i < buffer->page_count; i++) {
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
if (dma_mapping_error(card->device, address)) {
__free_page(buffer->pages[i]);
goto out_pages;
}
if (dma_mapping_error(card->device, address))
break;
set_page_private(buffer->pages[i], address);
}
buffer->page_count_mapped = i;
if (i < buffer->page_count)
return -ENOMEM;
return 0;
}
out_pages:
for (j = 0; j < i; j++) {
address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
PAGE_SIZE, direction);
__free_page(buffer->pages[j]);
}
kfree(buffer->pages);
out:
buffer->pages = NULL;
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
{
int ret;
ret = fw_iso_buffer_alloc(buffer, page_count);
if (ret < 0)
return ret;
ret = fw_iso_buffer_map_dma(buffer, card, direction);
if (ret < 0)
fw_iso_buffer_destroy(buffer, card);
return -ENOMEM;
return ret;
}
EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma)
{
unsigned long uaddr;
int i, err;
......@@ -107,15 +128,18 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
int i;
dma_addr_t address;
for (i = 0; i < buffer->page_count; i++) {
for (i = 0; i < buffer->page_count_mapped; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, buffer->direction);
__free_page(buffer->pages[i]);
}
for (i = 0; i < buffer->page_count; i++)
__free_page(buffer->pages[i]);
kfree(buffer->pages);
buffer->pages = NULL;
buffer->page_count = 0;
buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
......
......@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
......@@ -169,7 +170,11 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma);
/* -topology */
......
......@@ -391,6 +391,7 @@ struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
int page_count;
int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment