Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
01fed931
Commit
01fed931
authored
Nov 10, 2007
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Consolidate slab/kmalloc minalign values.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
9b01bd9e
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
14 additions
and
12 deletions
+14
-12
include/asm-sh/page.h
include/asm-sh/page.h
+14
-0
include/asm-sh/uaccess_64.h
include/asm-sh/uaccess_64.h
+0
-12
No files found.
include/asm-sh/page.h
View file @
01fed931
...
@@ -157,8 +157,22 @@ typedef struct { unsigned long pgd; } pgd_t;
...
@@ -157,8 +157,22 @@ typedef struct { unsigned long pgd; } pgd_t;
* Slub defaults to 8-byte alignment, we're only interested in 4.
* Slub defaults to 8-byte alignment, we're only interested in 4.
* Slab defaults to BYTES_PER_WORD, which ends up being the same anyways.
* Slab defaults to BYTES_PER_WORD, which ends up being the same anyways.
*/
*/
#ifdef CONFIG_SUPERH32
#define ARCH_KMALLOC_MINALIGN 4
#define ARCH_KMALLOC_MINALIGN 4
#define ARCH_SLAB_MINALIGN 4
#define ARCH_SLAB_MINALIGN 4
#else
/* If gcc inlines memset, it will use st.q instructions. Therefore, we need
kmalloc allocations to be 8-byte aligned. Without this, the alignment
becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
sh64 at the moment). */
#define ARCH_KMALLOC_MINALIGN 8
/*
* We want 8-byte alignment for the slab caches as well, otherwise we have
* the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
*/
#define ARCH_SLAB_MINALIGN 8
#endif
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
/* __ASM_SH_PAGE_H */
#endif
/* __ASM_SH_PAGE_H */
include/asm-sh/uaccess_64.h
View file @
01fed931
...
@@ -297,18 +297,6 @@ struct exception_table_entry
...
@@ -297,18 +297,6 @@ struct exception_table_entry
#define ARCH_HAS_SEARCH_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
/* If gcc inlines memset, it will use st.q instructions. Therefore, we need
kmalloc allocations to be 8-byte aligned. Without this, the alignment
becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
sh64 at the moment). */
#define ARCH_KMALLOC_MINALIGN 8
/*
* We want 8-byte alignment for the slab caches as well, otherwise we have
* the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
*/
#define ARCH_SLAB_MINALIGN 8
/* Returns 0 if exception not found and fixup.unit otherwise. */
/* Returns 0 if exception not found and fixup.unit otherwise. */
extern
unsigned
long
search_exception_table
(
unsigned
long
addr
);
extern
unsigned
long
search_exception_table
(
unsigned
long
addr
);
extern
const
struct
exception_table_entry
*
search_exception_tables
(
unsigned
long
addr
);
extern
const
struct
exception_table_entry
*
search_exception_tables
(
unsigned
long
addr
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment