Commit 0f06c063 authored by Tejun Heo's avatar Tejun Heo

alpha: use L1_CACHE_BYTES for cacheline size in the linker script

Currently the linker script uses 64 for cacheline size which isn't
optimal for all cases.  Include asm/cache.h and use L1_CACHE_BYTES
instead as suggested by Sam Ravnborg.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Sam Ravnborg <sam@ravnborg.org>
parent 19df0c2f
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/page.h> #include <asm/page.h>
OUTPUT_FORMAT("elf64-alpha") OUTPUT_FORMAT("elf64-alpha")
...@@ -38,7 +39,7 @@ SECTIONS ...@@ -38,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE); __init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE) INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
PERCPU(64, PAGE_SIZE) PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */ needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
...@@ -46,7 +47,7 @@ SECTIONS ...@@ -46,7 +47,7 @@ SECTIONS
/* Freed after init ends here */ /* Freed after init ends here */
_data = .; _data = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.got : { .got : {
*(.got) *(.got)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment