Commit 5659c0e4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "A number of ARM fixes, the biggest is fixing a regression caused by
  appended DT blobs exceeding 64K, causing the decompressor fixup code
  to fail to patch the DT blob.  Another important fix is for the ASID
  allocator from Will Deacon which prevents some rare crashes seen on
  some systems.  Lastly, there's a build fix for v7M systems when printk
  support is disabled.

  The last two remaining fixes are more cosmetic - the IOMMU one
  prevents an annoying harmless warning message, and we disable the
  kernel strict memory permissions on non-MMU which can't support it
  anyway"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8299/1: mm: ensure local active ASID is marked as allocated on rollover
  ARM: 8298/1: ARM_KERNMEM_PERMS only works with MMU enabled
  ARM: 8295/1: fix v7M build for !CONFIG_PRINTK
  ARM: 8294/1: ATAG_DTB_COMPAT: remove the DT workspace's hardcoded 64KB size
  ARM: 8288/1: dma-mapping: don't detach devices without an IOMMU during teardown
parents dc6d6844 8e648066
...@@ -263,16 +263,37 @@ restart: adr r0, LC0 ...@@ -263,16 +263,37 @@ restart: adr r0, LC0
* OK... Let's do some funky business here. * OK... Let's do some funky business here.
* If we do have a DTB appended to zImage, and we do have * If we do have a DTB appended to zImage, and we do have
* an ATAG list around, we want the later to be translated * an ATAG list around, we want the later to be translated
* and folded into the former here. To be on the safe side, * and folded into the former here. No GOT fixup has occurred
* let's temporarily move the stack away into the malloc * yet, but none of the code we're about to call uses any
* area. No GOT fixup has occurred yet, but none of the * global variable.
* code we're about to call uses any global variable.
*/ */
add sp, sp, #0x10000
/* Get the initial DTB size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
/* 50% DTB growth should be good enough */
add r5, r5, r5, lsr #1
/* preserve 64-bit alignment */
add r5, r5, #7
bic r5, r5, #7
/* clamp to 32KB min and 1MB max */
cmp r5, #(1 << 15)
movlo r5, #(1 << 15)
cmp r5, #(1 << 20)
movhi r5, #(1 << 20)
/* temporarily relocate the stack past the DTB work space */
add sp, sp, r5
stmfd sp!, {r0-r3, ip, lr} stmfd sp!, {r0-r3, ip, lr}
mov r0, r8 mov r0, r8
mov r1, r6 mov r1, r6
sub r2, sp, r6 mov r2, r5
bl atags_to_fdt bl atags_to_fdt
/* /*
...@@ -285,11 +306,11 @@ restart: adr r0, LC0 ...@@ -285,11 +306,11 @@ restart: adr r0, LC0
bic r0, r0, #1 bic r0, r0, #1
add r0, r0, #0x100 add r0, r0, #0x100
mov r1, r6 mov r1, r6
sub r2, sp, r6 mov r2, r5
bleq atags_to_fdt bleq atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr} ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000 sub sp, sp, r5
#endif #endif
mov r8, r6 @ use the appended device tree mov r8, r6 @ use the appended device tree
...@@ -306,7 +327,7 @@ restart: adr r0, LC0 ...@@ -306,7 +327,7 @@ restart: adr r0, LC0
subs r1, r5, r1 subs r1, r5, r1
addhi r9, r9, r1 addhi r9, r9, r1
/* Get the dtb's size */ /* Get the current DTB size */
ldr r5, [r6, #4] ldr r5, [r6, #4]
#ifndef __ARMEB__ #ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */ /* convert r5 (dtb size) to little endian */
......
...@@ -22,10 +22,12 @@ ...@@ -22,10 +22,12 @@
__invalid_entry: __invalid_entry:
v7m_exception_entry v7m_exception_entry
#ifdef CONFIG_PRINTK
adr r0, strerr adr r0, strerr
mrs r1, ipsr mrs r1, ipsr
mov r2, lr mov r2, lr
bl printk bl printk
#endif
mov r0, sp mov r0, sp
bl show_regs bl show_regs
1: b 1b 1: b 1b
......
...@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN ...@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
config ARM_KERNMEM_PERMS config ARM_KERNMEM_PERMS
bool "Restrict kernel memory permissions" bool "Restrict kernel memory permissions"
depends on MMU
help help
If this is set, kernel memory other than kernel text (and rodata) If this is set, kernel memory other than kernel text (and rodata)
will be made non-executable. The tradeoff is that each region is will be made non-executable. The tradeoff is that each region is
......
...@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu) ...@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */ /* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS); bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (i == cpu) { asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
asid = 0; /*
} else { * If this CPU has already been through a
asid = atomic64_xchg(&per_cpu(active_asids, i), 0); * rollover, but hasn't run another task in
/* * the meantime, we must preserve its reserved
* If this CPU has already been through a * ASID, as this is the only trace we have of
* rollover, but hasn't run another task in * the process it is still running.
* the meantime, we must preserve its reserved */
* ASID, as this is the only trace we have of if (asid == 0)
* the process it is still running. asid = per_cpu(reserved_asids, i);
*/ __set_bit(asid & ~ASID_MASK, asid_map);
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(asid & ~ASID_MASK, asid_map);
}
per_cpu(reserved_asids, i) = asid; per_cpu(reserved_asids, i) = asid;
} }
......
...@@ -2048,6 +2048,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) ...@@ -2048,6 +2048,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{ {
struct dma_iommu_mapping *mapping = dev->archdata.mapping; struct dma_iommu_mapping *mapping = dev->archdata.mapping;
if (!mapping)
return;
__arm_iommu_detach_device(dev); __arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping); arm_iommu_release_mapping(mapping);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment