Commit c78d0c74 authored by Heiko Carstens's avatar Heiko Carstens

s390: rename dma section to amode31

The dma section name is confusing, since the code which resides within
that section has nothing to do with direct memory access.  Instead the
limitation is that the code has to run in 31 bit addressing mode, and
therefore has to reside below 2GB.  So the name was chosen since
ZONE_DMA is the same region.

To reduce confusion rename the section to amode31, which hopefully
describes better what this is about.

Note: this will also change vmcoreinfo strings
- SDMA=... gets renamed to SAMODE31=...
- EDMA=... gets renamed to EAMODE31=...
Acked-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Reviewed-by: default avatarAlexander Egorenkov <egorenar@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent cfafad6d
......@@ -21,7 +21,7 @@
static void diag0c_fn(void *data)
{
diag_stat_inc(DIAG_STAT_X00C);
diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
}
/*
......
......@@ -309,7 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode);
struct hypfs_diag0c_entry;
/* This struct must contain only pointers/references into the text DMA section. */
/*
* This structure must contain only pointers/references into
* the AMODE31 text section.
*/
struct diag_ops {
int (*diag210)(struct diag210 *addr);
int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
......@@ -318,13 +321,13 @@ struct diag_ops {
void (*diag308_reset)(void);
};
extern struct diag_ops diag_dma_ops;
extern struct diag210 *__diag210_tmp_dma;
extern struct diag_ops diag_amode31_ops;
extern struct diag210 *__diag210_tmp_amode31;
int _diag210_dma(struct diag210 *addr);
int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
void _diag0c_dma(struct hypfs_diag0c_entry *entry);
void _diag308_reset_dma(void);
int _diag210_amode31(struct diag210 *addr);
int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
void _diag308_reset_amode31(void);
#endif /* _ASM_S390_DIAG_H */
......@@ -28,8 +28,8 @@ struct exception_table_entry
long handler;
};
extern struct exception_table_entry *__start_dma_ex_table;
extern struct exception_table_entry *__stop_dma_ex_table;
extern struct exception_table_entry *__start_amode31_ex_table;
extern struct exception_table_entry *__stop_amode31_ex_table;
const struct exception_table_entry *s390_search_extables(unsigned long addr);
......
......@@ -22,7 +22,7 @@
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target)
#define EX_TABLE_DMA(_fault, _target) \
__EX_TABLE(.dma.ex_table, _fault, _target)
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target)
#endif
......@@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
*/
#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
extern unsigned long __sdma, __edma;
extern unsigned long __stext_dma, __etext_dma;
extern unsigned long __samode31, __eamode31;
extern unsigned long __stext_amode31, __etext_amode31;
#endif
......@@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o text_dma.o
obj-y += smp.o text_amode31.o
extra-y += head64.o vmlinux.lds
......
......@@ -51,16 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
struct diag_ops __dma_ref diag_dma_ops = {
.diag210 = _diag210_dma,
.diag26c = _diag26c_dma,
.diag14 = _diag14_dma,
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
struct diag_ops __amode31_ref diag_amode31_ops = {
.diag210 = _diag210_amode31,
.diag26c = _diag26c_amode31,
.diag14 = _diag14_amode31,
.diag0c = _diag0c_amode31,
.diag308_reset = _diag308_reset_amode31
};
static struct diag210 _diag210_tmp_dma __section(".dma.data");
struct diag210 __dma_ref *__diag210_tmp_dma = &_diag210_tmp_dma;
static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
static int show_diag_stat(struct seq_file *m, void *v)
{
......@@ -144,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
return diag_dma_ops.diag14(rx, ry1, subcode);
return diag_amode31_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
......@@ -181,12 +181,12 @@ int diag210(struct diag210 *addr)
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
*__diag210_tmp_dma = *addr;
*__diag210_tmp_amode31 = *addr;
diag_stat_inc(DIAG_STAT_X210);
ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
*addr = *__diag210_tmp_dma;
*addr = *__diag210_tmp_amode31;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
......@@ -214,6 +214,6 @@ EXPORT_SYMBOL(diag224);
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
return diag_dma_ops.diag26c(req, resp, subcode);
return diag_amode31_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);
......@@ -64,13 +64,13 @@ void stack_free(unsigned long stack);
extern char kprobes_insn_page[];
extern char _sdma[], _edma[];
extern char _stext_dma[], _etext_dma[];
extern struct exception_table_entry _start_dma_ex_table[];
extern struct exception_table_entry _stop_dma_ex_table[];
extern char _samode31[], _eamode31[];
extern char _stext_amode31[], _etext_amode31[];
extern struct exception_table_entry _start_amode31_ex_table[];
extern struct exception_table_entry _stop_amode31_ex_table[];
#define __dma_data __section(".dma.data")
#define __dma_ref __section(".dma.refs")
extern long _start_dma_refs[], _end_dma_refs[];
#define __amode31_data __section(".amode31.data")
#define __amode31_ref __section(".amode31.refs")
extern long _start_amode31_refs[], _end_amode31_refs[];
#endif /* _ENTRY_H */
......@@ -2082,7 +2082,7 @@ void s390_reset_system(void)
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
diag_dma_ops.diag308_reset();
diag_amode31_ops.diag308_reset();
}
#ifdef CONFIG_KEXEC_FILE
......
......@@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
vmcoreinfo_append_str("EDMA=%lx\n", __edma);
vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
......
......@@ -92,36 +92,36 @@ EXPORT_SYMBOL(console_irq);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .dma section.
* Such code and data is part of the .amode31 section.
*/
unsigned long __dma_ref __sdma = __pa(&_sdma);
unsigned long __dma_ref __edma = __pa(&_edma);
unsigned long __dma_ref __stext_dma = __pa(&_stext_dma);
unsigned long __dma_ref __etext_dma = __pa(&_etext_dma);
struct exception_table_entry __dma_ref *__start_dma_ex_table = _start_dma_ex_table;
struct exception_table_entry __dma_ref *__stop_dma_ex_table = _stop_dma_ex_table;
unsigned long __amode31_ref __samode31 = __pa(&_samode31);
unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
/*
* Control registers CR2, CR5 and CR15 are initialized with addresses
* of tables that must be placed below 2G which is handled by the DMA
* of tables that must be placed below 2G which is handled by the AMODE31
* sections.
* Because the DMA sections are relocated below 2G at startup,
* Because the AMODE31 sections are relocated below 2G at startup,
* the content of control registers CR2, CR5 and CR15 must be updated
* with new addresses after the relocation. The initial initialization of
* control registers occurs in head64.S and then gets updated again after DMA
* relocation. We must access the relevant DMA tables indirectly via
* pointers placed in the .dma.refs linker section. Those pointers get
* updated automatically during DMA relocation and always contain a valid
* address within DMA sections.
* control registers occurs in head64.S and then gets updated again after AMODE31
* relocation. We must access the relevant AMODE31 tables indirectly via
* pointers placed in the .amode31.refs linker section. Those pointers get
* updated automatically during AMODE31 relocation and always contain a valid
* address within AMODE31 sections.
*/
static __dma_data u32 __ctl_duct_dma[16] __aligned(64);
static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
static __dma_data u64 __ctl_aste_dma[8] __aligned(64) = {
static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
[1] = 0xffffffffffffffff
};
static __dma_data u32 __ctl_duald_dma[32] __aligned(128) = {
static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
......@@ -132,15 +132,15 @@ static __dma_data u32 __ctl_duald_dma[32] __aligned(128) = {
0x80000000, 0, 0, 0
};
static __dma_data u32 __ctl_linkage_stack_dma[8] __aligned(64) = {
static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
0, 0, 0x89000000, 0,
0, 0, 0x8a000000, 0
};
static u64 __dma_ref *__ctl_aste = __ctl_aste_dma;
static u32 __dma_ref *__ctl_duald = __ctl_duald_dma;
static u32 __dma_ref *__ctl_linkage_stack = __ctl_linkage_stack_dma;
static u32 __dma_ref *__ctl_duct = __ctl_duct_dma;
static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
......@@ -814,31 +814,31 @@ static void __init setup_memory(void)
memblock_enforce_memory_limit(memblock_end_of_DRAM());
}
static void __init relocate_dma_section(void)
static void __init relocate_amode31_section(void)
{
unsigned long dma_addr, dma_size;
long dma_offset;
unsigned long amode31_addr, amode31_size;
long amode31_offset;
long *ptr;
/* Allocate a new DMA capable memory region */
dma_size = __edma - __sdma;
pr_info("Relocating DMA section of size 0x%08lx\n", dma_size);
dma_addr = (unsigned long)memblock_alloc_low(dma_size, PAGE_SIZE);
if (!dma_addr)
panic("Failed to allocate memory for DMA section\n");
dma_offset = dma_addr - __sdma;
/* Move original DMA section to the new one */
memmove((void *)dma_addr, (void *)__sdma, dma_size);
/* Zero out the old DMA section to catch invalid accesses within it */
memset((void *)__sdma, 0, dma_size);
/* Update all DMA region references */
for (ptr = _start_dma_refs; ptr != _end_dma_refs; ptr++)
*ptr += dma_offset;
/* Allocate a new AMODE31 capable memory region */
amode31_size = __eamode31 - __samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
if (!amode31_addr)
panic("Failed to allocate memory for AMODE31 section\n");
amode31_offset = amode31_addr - __samode31;
/* Move original AMODE31 section to the new one */
memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size);
/* Update all AMODE31 region references */
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
*ptr += amode31_offset;
}
/* This must be called after DMA relocation */
/* This must be called after AMODE31 relocation */
static void __init setup_cr(void)
{
union ctlreg2 cr2;
......@@ -1002,7 +1002,7 @@ void __init setup_arch(char **cmdline_p)
free_mem_detect_info();
relocate_dma_section();
relocate_amode31_section();
setup_cr();
setup_uv();
......
......@@ -709,7 +709,7 @@ void __init smp_save_dump_cpus(void)
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
}
memblock_free(page, PAGE_SIZE);
diag_dma_ops.diag308_reset();
diag_amode31_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */
......
......@@ -9,14 +9,14 @@
#include <asm/errno.h>
#include <asm/sigp.h>
.section .dma.text,"ax"
.section .amode31.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
* because they might be more than 2 GB away, and not reachable by the relative
* branch. No comdat, exrl, etc. optimizations used here, because it only
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
.macro BR_EX_AMODE31_r14
larl %r1,0f
ex 0,0(%r1)
j .
......@@ -24,9 +24,9 @@
.endm
/*
* int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
* int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
*/
ENTRY(_diag14_dma)
ENTRY(_diag14_amode31)
lgr %r1,%r2
lgr %r2,%r3
lgr %r3,%r4
......@@ -39,14 +39,14 @@ ENTRY(_diag14_dma)
.Ldiag14_fault:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
ENDPROC(_diag14_dma)
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
ENDPROC(_diag14_amode31)
/*
* int _diag210_dma(struct diag210 *addr)
* int _diag210_amode31(struct diag210 *addr)
*/
ENTRY(_diag210_dma)
ENTRY(_diag210_amode31)
lgr %r1,%r2
lhi %r2,-1
sam31
......@@ -57,40 +57,40 @@ ENTRY(_diag210_dma)
.Ldiag210_fault:
sam64
lgfr %r2,%r2
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
ENDPROC(_diag210_dma)
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
ENDPROC(_diag210_amode31)
/*
* int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
* int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
*/
ENTRY(_diag26c_dma)
ENTRY(_diag26c_amode31)
lghi %r5,-EOPNOTSUPP
sam31
diag %r2,%r4,0x26c
.Ldiag26c_ex:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
ENDPROC(_diag26c_dma)
BR_EX_AMODE31_r14
EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
ENDPROC(_diag26c_amode31)
/*
* void _diag0c_dma(struct hypfs_diag0c_entry *entry)
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
*/
ENTRY(_diag0c_dma)
ENTRY(_diag0c_amode31)
sam31
diag %r2,%r2,0x0c
sam64
BR_EX_DMA_r14
ENDPROC(_diag0c_dma)
BR_EX_AMODE31_r14
ENDPROC(_diag0c_amode31)
/*
* void _diag308_reset_dma(void)
* void _diag308_reset_amode31(void)
*
* Calls diag 308 subcode 1 and continues execution
*/
ENTRY(_diag308_reset_dma)
ENTRY(_diag308_reset_amode31)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
......@@ -131,10 +131,10 @@ restart_part2:
stg %r2,8(%r4)
lpswe 0(%r4)
.Lcontinue:
BR_EX_DMA_r14
ENDPROC(_diag308_reset_dma)
BR_EX_AMODE31_r14
ENDPROC(_diag308_reset_amode31)
.section .dma.data,"aw",@progbits
.section .amode31.data,"aw",@progbits
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
......
......@@ -291,7 +291,7 @@ static void __init test_monitor_call(void)
void __init trap_init(void)
{
sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
local_mcck_enable();
test_monitor_call();
}
......
......@@ -74,10 +74,10 @@ SECTIONS
BOOT_DATA_PRESERVED
. = ALIGN(8);
.dma.refs : {
_start_dma_refs = .;
*(.dma.refs)
_end_dma_refs = .;
.amod31.refs : {
_start_amode31_refs = .;
*(.amode31.refs)
_end_amode31_refs = .;
}
_edata = .; /* End of data section */
......@@ -146,30 +146,30 @@ SECTIONS
BOOT_DATA
/*
* .dma section for code, data, ex_table that need to stay below 2 GB,
* even when the kernel is relocated above 2 GB.
* .amode31 section for code, data, ex_table that need to stay
* below 2 GB, even when the kernel is relocated above 2 GB.
*/
. = ALIGN(PAGE_SIZE);
_sdma = .;
.dma.text : {
_stext_dma = .;
*(.dma.text)
*(.dma.text.*_indirect_*)
_samode31 = .;
.amode31.text : {
_stext_amode31 = .;
*(.amode31.text)
*(.amode31.text.*_indirect_*)
. = ALIGN(PAGE_SIZE);
_etext_dma = .;
_etext_amode31 = .;
}
. = ALIGN(16);
.dma.ex_table : {
_start_dma_ex_table = .;
KEEP(*(.dma.ex_table))
_stop_dma_ex_table = .;
.amode31.ex_table : {
_start_amode31_ex_table = .;
KEEP(*(.amode31.ex_table))
_stop_amode31_ex_table = .;
}
. = ALIGN(PAGE_SIZE);
.dma.data : {
*(.dma.data)
.amode31.data : {
*(.amode31.data)
}
. = ALIGN(PAGE_SIZE);
_edma = .;
_eamode31 = .;
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
......
......@@ -231,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
const struct exception_table_entry *fixup;
fixup = search_extable(__start_dma_ex_table,
__stop_dma_ex_table - __start_dma_ex_table,
fixup = search_extable(__start_amode31_ex_table,
__stop_amode31_ex_table - __start_amode31_ex_table,
addr);
if (!fixup)
fixup = search_exception_tables(addr);
......
......@@ -581,7 +581,7 @@ void __init vmem_map_init(void)
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
/* we need lowcore executable for our LPSWE instructions */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment