Commit 66728eee authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/pci_dma: handle dma table failures

We use lazy allocation for translation table entries but don't handle
allocation (and other) failures during translation table updates.

Handle these failures and undo translation table updates when it's
meaningful.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 4d5a6b72
...@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *); ...@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
void dma_free_seg_table(unsigned long); void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void); unsigned long *dma_alloc_cpu_table(void);
void dma_cleanup_tables(unsigned long *); void dma_cleanup_tables(unsigned long *);
void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
#endif #endif
...@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry) ...@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
return pto; return pto;
} }
static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
{ {
unsigned long *sto, *pto; unsigned long *sto, *pto;
unsigned int rtx, sx, px; unsigned int rtx, sx, px;
...@@ -114,17 +114,8 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr ...@@ -114,17 +114,8 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
return &pto[px]; return &pto[px];
} }
void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
dma_addr_t dma_addr, int flags)
{ {
unsigned long *entry;
entry = dma_walk_cpu_trans(dma_table, dma_addr);
if (!entry) {
WARN_ON_ONCE(1);
return;
}
if (flags & ZPCI_PTE_INVALID) { if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry); invalidate_pt_entry(entry);
} else { } else {
...@@ -145,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, ...@@ -145,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
u8 *page_addr = (u8 *) (pa & PAGE_MASK); u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr; dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags; unsigned long irq_flags;
unsigned long *entry;
int i, rc = 0; int i, rc = 0;
if (!nr_pages) if (!nr_pages)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) if (!zdev->dma_table) {
rc = -EINVAL;
goto no_refresh; goto no_refresh;
}
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
flags); if (!entry) {
rc = -ENOMEM;
goto undo_cpu_trans;
}
dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE; page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE; dma_addr += PAGE_SIZE;
} }
...@@ -175,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, ...@@ -175,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE); nr_pages * PAGE_SIZE);
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
while (i-- > 0) {
page_addr -= PAGE_SIZE;
dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
if (!entry)
break;
dma_update_cpu_trans(entry, page_addr, flags);
}
}
no_refresh: no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
......
...@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, ...@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
u8 *page_addr = (u8 *) (pa & PAGE_MASK); u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr; dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags, nr_pages, i; unsigned long irq_flags, nr_pages, i;
unsigned long *entry;
int rc = 0; int rc = 0;
if (dma_addr < s390_domain->domain.geometry.aperture_start || if (dma_addr < s390_domain->domain.geometry.aperture_start ||
...@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, ...@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(s390_domain->dma_table, page_addr, entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
dma_addr, flags); if (!entry) {
rc = -ENOMEM;
goto undo_cpu_trans;
}
dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE; page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE; dma_addr += PAGE_SIZE;
} }
...@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, ...@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
break; break;
} }
spin_unlock(&s390_domain->list_lock); spin_unlock(&s390_domain->list_lock);
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
while (i-- > 0) {
page_addr -= PAGE_SIZE;
dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(s390_domain->dma_table,
dma_addr);
if (!entry)
break;
dma_update_cpu_trans(entry, page_addr, flags);
}
}
spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment