Commit 52d43d81 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/pci_dma: improve debugging of errors during dma map

Improve debugging to find out what went wrong during a failed
dma map/unmap operation.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 66728eee
...@@ -269,6 +269,16 @@ static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size ...@@ -269,6 +269,16 @@ static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
} }
static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
{
struct {
unsigned long rc;
unsigned long addr;
} __packed data = {rc, addr};
zpci_err_hex(&data, sizeof(data));
}
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
enum dma_data_direction direction, enum dma_data_direction direction,
...@@ -279,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, ...@@ -279,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long pa = page_to_phys(page) + offset; unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID; int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret;
/* This rounds up number of pages based on size and offset */ /* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages); iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
if (iommu_page_index == -1) if (iommu_page_index == -1) {
ret = -ENOSPC;
goto out_err; goto out_err;
}
/* Use rounded up size */ /* Use rounded up size */
size = nr_pages * PAGE_SIZE; size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
if (dma_addr + size > zdev->end_dma) if (dma_addr + size > zdev->end_dma) {
ret = -ERANGE;
goto out_free; goto out_free;
}
if (direction == DMA_NONE || direction == DMA_TO_DEVICE) if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED; flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
if (ret)
goto out_free;
atomic64_add(nr_pages, &zdev->mapped_pages); atomic64_add(nr_pages, &zdev->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK); return dma_addr + (offset & ~PAGE_MASK);
}
out_free: out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages); dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err: out_err:
zpci_err("map error:\n"); zpci_err("map error:\n");
zpci_err_hex(&pa, sizeof(pa)); zpci_err_dma(ret, pa);
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
...@@ -315,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, ...@@ -315,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
{ {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long iommu_page_index; unsigned long iommu_page_index;
int npages; int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK; dma_addr = dma_addr & PAGE_MASK;
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
ZPCI_PTE_INVALID)) { ZPCI_PTE_INVALID);
if (ret) {
zpci_err("unmap error:\n"); zpci_err("unmap error:\n");
zpci_err_hex(&dma_addr, sizeof(dma_addr)); zpci_err_dma(ret, dma_addr);
return;
} }
atomic64_add(npages, &zdev->unmapped_pages); atomic64_add(npages, &zdev->unmapped_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment