Commit f7fcec93 authored by Paul Mundt's avatar Paul Mundt

sh: Fix up PMB locking.

This first converts the PMB locking over to raw spinlocks, and secondly
fixes up a nested locking issue that was triggering lockdep early on:

 swapper/0 is trying to acquire lock:
  (&pmbe->lock){......}, at: [<806be9bc>] pmb_init+0xf4/0x4dc

 but task is already holding lock:
  (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc

 other info that might help us debug this:
 1 lock held by swapper/0:
  #0:  (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 47da88f3
...@@ -40,7 +40,7 @@ struct pmb_entry { ...@@ -40,7 +40,7 @@ struct pmb_entry {
unsigned long flags; unsigned long flags;
unsigned long size; unsigned long size;
spinlock_t lock; raw_spinlock_t lock;
/* /*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or * 0 .. NR_PMB_ENTRIES for specific entry selection, or
...@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, ...@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
memset(pmbe, 0, sizeof(struct pmb_entry)); memset(pmbe, 0, sizeof(struct pmb_entry));
spin_lock_init(&pmbe->lock); raw_spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn; pmbe->vpn = vpn;
pmbe->ppn = ppn; pmbe->ppn = ppn;
...@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe) ...@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pmbe->lock, flags); raw_spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe); __set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags); raw_spin_unlock_irqrestore(&pmbe->lock, flags);
} }
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
...@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, ...@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
return PTR_ERR(pmbe); return PTR_ERR(pmbe);
} }
spin_lock_irqsave(&pmbe->lock, flags); raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size; pmbe->size = pmb_sizes[i].size;
...@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, ...@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
* entries for easier tear-down. * entries for easier tear-down.
*/ */
if (likely(pmbp)) { if (likely(pmbp)) {
spin_lock(&pmbp->lock); raw_spin_lock_nested(&pmbp->lock,
SINGLE_DEPTH_NESTING);
pmbp->link = pmbe; pmbp->link = pmbe;
spin_unlock(&pmbp->lock); raw_spin_unlock(&pmbp->lock);
} }
pmbp = pmbe; pmbp = pmbe;
...@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, ...@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
i--; i--;
mapped++; mapped++;
spin_unlock_irqrestore(&pmbe->lock, flags); raw_spin_unlock_irqrestore(&pmbe->lock, flags);
} }
} while (size >= SZ_16M); } while (size >= SZ_16M);
...@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void) ...@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
continue; continue;
} }
spin_lock_irqsave(&pmbe->lock, irqflags); raw_spin_lock_irqsave(&pmbe->lock, irqflags);
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size) if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size; pmbe->size = pmb_sizes[j].size;
if (pmbp) { if (pmbp) {
spin_lock(&pmbp->lock); raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
/* /*
* Compare the previous entry against the current one to * Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so, * see if the entries span a contiguous mapping. If so,
...@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void) ...@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
*/ */
if (pmb_can_merge(pmbp, pmbe)) if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe; pmbp->link = pmbe;
raw_spin_unlock(&pmbp->lock);
spin_unlock(&pmbp->lock);
} }
pmbp = pmbe; pmbp = pmbe;
spin_unlock_irqrestore(&pmbe->lock, irqflags); raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
} }
} }
...@@ -757,7 +756,7 @@ static void __init pmb_resize(void) ...@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
/* /*
* Found it, now resize it. * Found it, now resize it.
*/ */
spin_lock_irqsave(&pmbe->lock, flags); raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M; pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK; pmbe->flags &= ~PMB_SZ_MASK;
...@@ -767,7 +766,7 @@ static void __init pmb_resize(void) ...@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
__set_pmb_entry(pmbe); __set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags); raw_spin_unlock_irqrestore(&pmbe->lock, flags);
} }
read_unlock(&pmb_rwlock); read_unlock(&pmb_rwlock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment