Commit 4cad6719 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'asm-generic-unaligned-5.14' of...

Merge tag 'asm-generic-unaligned-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull asm/unaligned.h unification from Arnd Bergmann:
 "Unify asm/unaligned.h around struct helper

  The get_unaligned()/put_unaligned() helpers are traditionally
  architecture specific, with the two main variants being the
  "access-ok.h" version that assumes unaligned pointer accesses always
  work on a particular architecture, and the "le-struct.h" version that
  casts the data to a byte aligned type before dereferencing, for
  architectures that cannot always do unaligned accesses in hardware.

  Based on the discussion linked below, it appears that the access-ok
  version is not realiable on any architecture, but the struct version
  probably has no downsides. This series changes the code to use the
  same implementation on all architectures, addressing the few
  exceptions separately"

Link: https://lore.kernel.org/lkml/75d07691-1e4f-741f-9852-38c0b4f520bc@synopsys.com/
Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100363
Link: https://lore.kernel.org/lkml/20210507220813.365382-14-arnd@kernel.org/
Link: git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git unaligned-rework-v2
Link: https://lore.kernel.org/lkml/CAHk-=whGObOKruA_bU3aPGZfoDqZM1_9wBkwREp0H0FgR-90uQ@mail.gmail.com/

* tag 'asm-generic-unaligned-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
  asm-generic: simplify asm/unaligned.h
  asm-generic: uaccess: 1-byte access is always aligned
  netpoll: avoid put_unaligned() on single character
  mwifiex: re-fix for unaligned accesses
  apparmor: use get_unaligned() only for multi-byte words
  partitions: msdos: fix one-byte get_unaligned()
  asm-generic: unaligned always use struct helpers
  asm-generic: unaligned: remove byteshift helpers
  powerpc: use linux/unaligned/le_struct.h on LE power7
  m68k: select CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  sh: remove unaligned access for sh4a
  openrisc: always use unaligned-struct header
  asm-generic: use asm-generic/unaligned.h for most architectures
parents 40625491 803f4e1e
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ALPHA_UNALIGNED_H
#define _ASM_ALPHA_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif /* _ASM_ALPHA_UNALIGNED_H */
#ifndef __ASM_ARM_UNALIGNED_H
#define __ASM_ARM_UNALIGNED_H
/*
* We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
* but we don't want to use linux/unaligned/access_ok.h since that can lead
* to traps on unaligned stm/ldm or strd/ldrd.
*/
#include <asm/byteorder.h>
#if defined(__LITTLE_ENDIAN)
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
# error need to define endianess
#endif
#endif /* __ASM_ARM_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_UNALIGNED_H
#define _ASM_IA64_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif /* _ASM_IA64_UNALIGNED_H */
......@@ -21,6 +21,7 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_M68K_UNALIGNED_H
#define _ASM_M68K_UNALIGNED_H
#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#else
/*
* The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif
#endif /* _ASM_M68K_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2006 Atmark Techno, Inc.
*/
#ifndef _ASM_MICROBLAZE_UNALIGNED_H
#define _ASM_MICROBLAZE_UNALIGNED_H
# ifdef __KERNEL__
# ifdef __MICROBLAZEEL__
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
# else
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
# endif
# include <linux/unaligned/generic.h>
# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_UNALIGNED_H */
......@@ -8,13 +8,13 @@
* Copyright (C) 2018 MIPS Tech, LLC
*/
#include <linux/unaligned/access_ok.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* OpenRISC implementation:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
* et al.
*/
#ifndef __ASM_OPENRISC_UNALIGNED_H
#define __ASM_OPENRISC_UNALIGNED_H
/*
* This is copied from the generic implementation and the C-struct
* variant replaced with the memmove variant. The GCC compiler
* for the OR32 arch optimizes too aggressively for the C-struct
* variant to work, so use the memmove variant instead.
*
* It may be worth considering implementing the unaligned access
* exception handler and allowing unaligned accesses (access_ok.h)...
* not sure if it would be much of a performance win without further
* investigation.
*/
#include <asm/byteorder.h>
#if defined(__LITTLE_ENDIAN)
# include <linux/unaligned/le_memmove.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
# include <linux/unaligned/be_memmove.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
# error need to define endianess
#endif
#endif /* __ASM_OPENRISC_UNALIGNED_H */
......@@ -2,11 +2,7 @@
#ifndef _ASM_PARISC_UNALIGNED_H
#define _ASM_PARISC_UNALIGNED_H
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#include <asm-generic/unaligned.h>
#ifdef __KERNEL__
struct pt_regs;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_UNALIGNED_H
#define _ASM_POWERPC_UNALIGNED_H
#ifdef __KERNEL__
/*
* The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#ifdef __LITTLE_ENDIAN__
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_UNALIGNED_SH4A_H
#define __ASM_SH_UNALIGNED_SH4A_H
/*
* SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
* Support for 64-bit accesses are done through shifting and masking
* relative to the endianness. Unaligned stores are not supported by the
* instruction encoding, so these continue to use the packed
* struct.
*
* The same note as with the movli.l/movco.l pair applies here, as long
* as the load is guaranteed to be inlined, nothing else will hook in to
* r0 and we get the return value for free.
*
* NOTE: Due to the fact we require r0 encoding, care should be taken to
* avoid mixing these heavily with other r0 consumers, such as the atomic
* ops. Failure to adhere to this can result in the compiler running out
* of spill registers and blowing up when building at low optimization
* levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
*/
#include <linux/unaligned/packed_struct.h>
#include <linux/types.h>
#include <asm/byteorder.h>
static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
{
#ifdef __LITTLE_ENDIAN
return p[0] | p[1] << 8;
#else
return p[0] << 8 | p[1];
#endif
}
static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
{
unsigned long unaligned;
__asm__ __volatile__ (
"movua.l @%1, %0\n\t"
: "=z" (unaligned)
: "r" (p)
);
return unaligned;
}
/*
* Even though movua.l supports auto-increment on the read side, it can
* only store to r0 due to instruction encoding constraints, so just let
* the compiler sort it out on its own.
*/
static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
{
#ifdef __LITTLE_ENDIAN
return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
sh4a_get_unaligned_cpu32(p);
#else
return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
sh4a_get_unaligned_cpu32(p + 4);
#endif
}
static inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
}
static inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
}
static inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
}
static inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
}
static inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
}
static inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
}
static inline void nonnative_put_le16(u16 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
}
static inline void nonnative_put_le32(u32 val, u8 *p)
{
nonnative_put_le16(val, p);
nonnative_put_le16(val >> 16, p + 2);
}
static inline void nonnative_put_le64(u64 val, u8 *p)
{
nonnative_put_le32(val, p);
nonnative_put_le32(val >> 32, p + 4);
}
static inline void nonnative_put_be16(u16 val, u8 *p)
{
*p++ = val >> 8;
*p++ = val;
}
static inline void nonnative_put_be32(u32 val, u8 *p)
{
nonnative_put_be16(val >> 16, p);
nonnative_put_be16(val, p + 2);
}
static inline void nonnative_put_be64(u64 val, u8 *p)
{
nonnative_put_be32(val >> 32, p);
nonnative_put_be32(val, p + 4);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
#ifdef __LITTLE_ENDIAN
__put_unaligned_cpu16(val, p);
#else
nonnative_put_le16(val, p);
#endif
}
static inline void put_unaligned_le32(u32 val, void *p)
{
#ifdef __LITTLE_ENDIAN
__put_unaligned_cpu32(val, p);
#else
nonnative_put_le32(val, p);
#endif
}
static inline void put_unaligned_le64(u64 val, void *p)
{
#ifdef __LITTLE_ENDIAN
__put_unaligned_cpu64(val, p);
#else
nonnative_put_le64(val, p);
#endif
}
static inline void put_unaligned_be16(u16 val, void *p)
{
#ifdef __BIG_ENDIAN
__put_unaligned_cpu16(val, p);
#else
nonnative_put_be16(val, p);
#endif
}
static inline void put_unaligned_be32(u32 val, void *p)
{
#ifdef __BIG_ENDIAN
__put_unaligned_cpu32(val, p);
#else
nonnative_put_be32(val, p);
#endif
}
static inline void put_unaligned_be64(u64 val, void *p)
{
#ifdef __BIG_ENDIAN
__put_unaligned_cpu64(val, p);
#else
nonnative_put_be64(val, p);
#endif
}
/*
* While it's a bit non-obvious, even though the generic le/be wrappers
* use the __get/put_xxx prefixing, they actually wrap in to the
* non-prefixed get/put_xxx variants as provided above.
*/
#include <linux/unaligned/generic.h>
#ifdef __LITTLE_ENDIAN
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#else
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#endif
#endif /* __ASM_SH_UNALIGNED_SH4A_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SH_UNALIGNED_H
#define _ASM_SH_UNALIGNED_H
#ifdef CONFIG_CPU_SH4A
/* SH-4A can handle unaligned loads in a relatively neutered fashion. */
#include <asm/unaligned-sh4a.h>
#else
/* Otherwise, SH can't handle unaligned accesses. */
#include <asm-generic/unaligned.h>
#endif
#endif /* _ASM_SH_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SPARC_UNALIGNED_H
#define _ASM_SPARC_UNALIGNED_H
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif /* _ASM_SPARC_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_UNALIGNED_H
#define _ASM_X86_UNALIGNED_H
/*
* The x86 can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif /* _ASM_X86_UNALIGNED_H */
/*
* Xtensa doesn't handle unaligned accesses efficiently.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _ASM_XTENSA_UNALIGNED_H
#define _ASM_XTENSA_UNALIGNED_H
#include <asm/byteorder.h>
#ifdef __LITTLE_ENDIAN
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#else
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#endif
#endif /* _ASM_XTENSA_UNALIGNED_H */
......@@ -510,7 +510,7 @@ static bool ldm_validate_partition_table(struct parsed_partitions *state)
p = (struct msdos_partition *)(data + 0x01BE);
for (i = 0; i < 4; i++, p++)
if (SYS_IND (p) == LDM_PARTITION) {
if (p->sys_ind == LDM_PARTITION) {
result = true;
break;
}
......
......@@ -84,9 +84,6 @@ struct parsed_partitions;
#define TOC_BITMAP1 "config" /* Names of the two defined */
#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */
/* Borrowed from msdos.c */
#define SYS_IND(p) (get_unaligned(&(p)->sys_ind))
struct frag { /* VBLK Fragment handling */
struct list_head list;
u32 group;
......
......@@ -38,8 +38,6 @@
*/
#include <asm/unaligned.h>
#define SYS_IND(p) get_unaligned(&p->sys_ind)
static inline sector_t nr_sects(struct msdos_partition *p)
{
return (sector_t)get_unaligned_le32(&p->nr_sects);
......@@ -52,9 +50,9 @@ static inline sector_t start_sect(struct msdos_partition *p)
static inline int is_extended_partition(struct msdos_partition *p)
{
return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
SYS_IND(p) == LINUX_EXTENDED_PARTITION);
return (p->sys_ind == DOS_EXTENDED_PARTITION ||
p->sys_ind == WIN98_EXTENDED_PARTITION ||
p->sys_ind == LINUX_EXTENDED_PARTITION);
}
#define MSDOS_LABEL_MAGIC1 0x55
......@@ -193,7 +191,7 @@ static void parse_extended(struct parsed_partitions *state,
put_partition(state, state->next, next, size);
set_info(state, state->next, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
if (p->sys_ind == LINUX_RAID_PARTITION)
state->parts[state->next].flags = ADDPART_FLAG_RAID;
loopct = 0;
if (++state->next == state->limit)
......@@ -546,7 +544,7 @@ static void parse_minix(struct parsed_partitions *state,
* a secondary MBR describing its subpartitions, or
* the normal boot sector. */
if (msdos_magic_present(data + 510) &&
SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
p->sys_ind == MINIX_PARTITION) { /* subpartition table present */
char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1];
snprintf(tmp, sizeof(tmp), " %s%d: <minix:", state->name, origin);
......@@ -555,7 +553,7 @@ static void parse_minix(struct parsed_partitions *state,
if (state->next == state->limit)
break;
/* add each partition in use */
if (SYS_IND(p) == MINIX_PARTITION)
if (p->sys_ind == MINIX_PARTITION)
put_partition(state, state->next++,
start_sect(p), nr_sects(p));
}
......@@ -643,7 +641,7 @@ int msdos_partition(struct parsed_partitions *state)
p = (struct msdos_partition *) (data + 0x1be);
for (slot = 1 ; slot <= 4 ; slot++, p++) {
/* If this is an EFI GPT disk, msdos should ignore it. */
if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
if (p->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT) {
put_dev_sector(sect);
return 0;
}
......@@ -685,11 +683,11 @@ int msdos_partition(struct parsed_partitions *state)
}
put_partition(state, slot, start, size);
set_info(state, slot, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
if (p->sys_ind == LINUX_RAID_PARTITION)
state->parts[slot].flags = ADDPART_FLAG_RAID;
if (SYS_IND(p) == DM6_PARTITION)
if (p->sys_ind == DM6_PARTITION)
strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
if (SYS_IND(p) == EZD_PARTITION)
if (p->sys_ind == EZD_PARTITION)
strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
}
......@@ -698,7 +696,7 @@ int msdos_partition(struct parsed_partitions *state)
/* second pass - output for each on a separate line */
p = (struct msdos_partition *) (0x1be + data);
for (slot = 1 ; slot <= 4 ; slot++, p++) {
unsigned char id = SYS_IND(p);
unsigned char id = p->sys_ind;
int n;
if (!nr_sects(p))
......
......@@ -1231,7 +1231,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
u32 tmp;
u32 *cookie;
card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
sizeof(u32),
......@@ -1242,13 +1242,11 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
"dma_alloc_coherent failed!\n");
return -ENOMEM;
}
cookie = (u32 *)card->sleep_cookie_vbase;
/* Init val of Sleep Cookie */
tmp = FW_AWAKE_COOKIE;
put_unaligned(tmp, card->sleep_cookie_vbase);
*cookie = FW_AWAKE_COOKIE;
mwifiex_dbg(adapter, INFO,
"alloc_scook: sleep cookie=0x%x\n",
get_unaligned(card->sleep_cookie_vbase));
mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
return 0;
}
......
......@@ -19,7 +19,7 @@ __get_user_fn(size_t size, const void __user *from, void *to)
switch (size) {
case 1:
*(u8 *)to = get_unaligned((u8 __force *)from);
*(u8 *)to = *((u8 __force *)from);
return 0;
case 2:
*(u16 *)to = get_unaligned((u16 __force *)from);
......@@ -45,7 +45,7 @@ __put_user_fn(size_t size, void __user *to, void *from)
switch (size) {
case 1:
put_unaligned(*(u8 *)from, (u8 __force *)to);
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
put_unaligned(*(u16 *)from, (u16 __force *)to);
......
......@@ -6,31 +6,124 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
/* Set by the arch if it can handle unaligned accesses in hardware. */
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
# include <linux/unaligned/access_ok.h>
#endif
#if defined(__LITTLE_ENDIAN)
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# endif
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# endif
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
# error need to define endianess
#endif
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x; \
})
#define __put_unaligned_t(type, val, ptr) do { \
struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x = (val); \
} while (0)
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
static inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpu(__get_unaligned_t(__le16, p));
}
static inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpu(__get_unaligned_t(__le32, p));
}
static inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpu(__get_unaligned_t(__le64, p));
}
static inline void put_unaligned_le16(u16 val, void *p)
{
__put_unaligned_t(__le16, cpu_to_le16(val), p);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
__put_unaligned_t(__le32, cpu_to_le32(val), p);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
__put_unaligned_t(__le64, cpu_to_le64(val), p);
}
static inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpu(__get_unaligned_t(__be16, p));
}
static inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpu(__get_unaligned_t(__be32, p));
}
static inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpu(__get_unaligned_t(__be64, p));
}
static inline void put_unaligned_be16(u16 val, void *p)
{
__put_unaligned_t(__be16, cpu_to_be16(val), p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
__put_unaligned_t(__be32, cpu_to_be32(val), p);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
__put_unaligned_t(__be64, cpu_to_be64(val), p);
}
static inline u32 __get_unaligned_be24(const u8 *p)
{
return p[0] << 16 | p[1] << 8 | p[2];
}
static inline u32 get_unaligned_be24(const void *p)
{
return __get_unaligned_be24(p);
}
static inline u32 __get_unaligned_le24(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16;
}
static inline u32 get_unaligned_le24(const void *p)
{
return __get_unaligned_le24(p);
}
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
*p++ = val >> 16;
*p++ = val >> 8;
*p++ = val;
}
static inline void put_unaligned_be24(const u32 val, void *p)
{
__put_unaligned_be24(val, p);
}
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
*p++ = val >> 16;
}
static inline void put_unaligned_le24(const u32 val, void *p)
{
__put_unaligned_le24(val, p);
}
#endif /* __ASM_GENERIC_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
#define _LINUX_UNALIGNED_ACCESS_OK_H
#include <linux/kernel.h>
#include <asm/byteorder.h>
static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
#include <linux/types.h>
static inline u16 __get_unaligned_be16(const u8 *p)
{
return p[0] << 8 | p[1];
}
static inline u32 __get_unaligned_be32(const u8 *p)
{
return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
}
static inline u64 __get_unaligned_be64(const u8 *p)
{
return (u64)__get_unaligned_be32(p) << 32 |
__get_unaligned_be32(p + 4);
}
static inline void __put_unaligned_be16(u16 val, u8 *p)
{
*p++ = val >> 8;
*p++ = val;
}
static inline void __put_unaligned_be32(u32 val, u8 *p)
{
__put_unaligned_be16(val >> 16, p);
__put_unaligned_be16(val, p + 2);
}
static inline void __put_unaligned_be64(u64 val, u8 *p)
{
__put_unaligned_be32(val >> 32, p);
__put_unaligned_be32(val, p + 4);
}
static inline u16 get_unaligned_be16(const void *p)
{
return __get_unaligned_be16(p);
}
static inline u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_be32(p);
}
static inline u64 get_unaligned_be64(const void *p)
{
return __get_unaligned_be64(p);
}
static inline void put_unaligned_be16(u16 val, void *p)
{
__put_unaligned_be16(val, p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
__put_unaligned_be32(val, p);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
__put_unaligned_be64(val, p);
}
#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
#define _LINUX_UNALIGNED_BE_MEMMOVE_H
#include <linux/unaligned/memmove.h>
static inline u16 get_unaligned_be16(const void *p)
{
return __get_unaligned_memmove16((const u8 *)p);
}
static inline u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_memmove32((const u8 *)p);
}
static inline u64 get_unaligned_be64(const void *p)
{
return __get_unaligned_memmove64((const u8 *)p);
}
static inline void put_unaligned_be16(u16 val, void *p)
{
__put_unaligned_memmove16(val, p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
__put_unaligned_memmove32(val, p);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
__put_unaligned_memmove64(val, p);
}
#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
#define _LINUX_UNALIGNED_BE_STRUCT_H
#include <linux/unaligned/packed_struct.h>
static inline u16 get_unaligned_be16(const void *p)
{
return __get_unaligned_cpu16((const u8 *)p);
}
static inline u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_cpu32((const u8 *)p);
}
static inline u64 get_unaligned_be64(const void *p)
{
return __get_unaligned_cpu64((const u8 *)p);
}
static inline void put_unaligned_be16(u16 val, void *p)
{
__put_unaligned_cpu16(val, p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
__put_unaligned_cpu32(val, p);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
__put_unaligned_cpu64(val, p);
}
#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_GENERIC_H
#define _LINUX_UNALIGNED_GENERIC_H
#include <linux/types.h>
/*
* Cause a link-time error if we try an unaligned access other than
* 1,2,4 or 8 bytes long
*/
extern void __bad_unaligned_access_size(void);
#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __put_unaligned_le(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_le16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_le32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_le64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
#define __put_unaligned_be(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_be16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_be32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_be64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
static inline u32 __get_unaligned_be24(const u8 *p)
{
return p[0] << 16 | p[1] << 8 | p[2];
}
static inline u32 get_unaligned_be24(const void *p)
{
return __get_unaligned_be24(p);
}
static inline u32 __get_unaligned_le24(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16;
}
static inline u32 get_unaligned_le24(const void *p)
{
return __get_unaligned_le24(p);
}
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
*p++ = val >> 16;
*p++ = val >> 8;
*p++ = val;
}
static inline void put_unaligned_be24(const u32 val, void *p)
{
__put_unaligned_be24(val, p);
}
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
*p++ = val >> 16;
}
static inline void put_unaligned_le24(const u32 val, void *p)
{
__put_unaligned_le24(val, p);
}
#endif /* _LINUX_UNALIGNED_GENERIC_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
#include <linux/types.h>
static inline u16 __get_unaligned_le16(const u8 *p)
{
return p[0] | p[1] << 8;
}
static inline u32 __get_unaligned_le32(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
}
static inline u64 __get_unaligned_le64(const u8 *p)
{
return (u64)__get_unaligned_le32(p + 4) << 32 |
__get_unaligned_le32(p);
}
static inline void __put_unaligned_le16(u16 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
}
static inline void __put_unaligned_le32(u32 val, u8 *p)
{
__put_unaligned_le16(val >> 16, p + 2);
__put_unaligned_le16(val, p);
}
static inline void __put_unaligned_le64(u64 val, u8 *p)
{
__put_unaligned_le32(val >> 32, p + 4);
__put_unaligned_le32(val, p);
}
static inline u16 get_unaligned_le16(const void *p)
{
return __get_unaligned_le16(p);
}
static inline u32 get_unaligned_le32(const void *p)
{
return __get_unaligned_le32(p);
}
static inline u64 get_unaligned_le64(const void *p)
{
return __get_unaligned_le64(p);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
__put_unaligned_le16(val, p);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
__put_unaligned_le32(val, p);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
__put_unaligned_le64(val, p);
}
#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
#define _LINUX_UNALIGNED_LE_MEMMOVE_H
#include <linux/unaligned/memmove.h>
static inline u16 get_unaligned_le16(const void *p)
{
return __get_unaligned_memmove16((const u8 *)p);
}
static inline u32 get_unaligned_le32(const void *p)
{
return __get_unaligned_memmove32((const u8 *)p);
}
static inline u64 get_unaligned_le64(const void *p)
{
return __get_unaligned_memmove64((const u8 *)p);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
__put_unaligned_memmove16(val, p);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
__put_unaligned_memmove32(val, p);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
__put_unaligned_memmove64(val, p);
}
#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
#define _LINUX_UNALIGNED_LE_STRUCT_H
#include <linux/unaligned/packed_struct.h>
static inline u16 get_unaligned_le16(const void *p)
{
return __get_unaligned_cpu16((const u8 *)p);
}
static inline u32 get_unaligned_le32(const void *p)
{
return __get_unaligned_cpu32((const u8 *)p);
}
static inline u64 get_unaligned_le64(const void *p)
{
return __get_unaligned_cpu64((const u8 *)p);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
__put_unaligned_cpu16(val, p);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
__put_unaligned_cpu32(val, p);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
__put_unaligned_cpu64(val, p);
}
#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UNALIGNED_MEMMOVE_H
#define _LINUX_UNALIGNED_MEMMOVE_H
#include <linux/kernel.h>
#include <linux/string.h>
/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
static inline u16 __get_unaligned_memmove16(const void *p)
{
u16 tmp;
memmove(&tmp, p, 2);
return tmp;
}
static inline u32 __get_unaligned_memmove32(const void *p)
{
u32 tmp;
memmove(&tmp, p, 4);
return tmp;
}
static inline u64 __get_unaligned_memmove64(const void *p)
{
u64 tmp;
memmove(&tmp, p, 8);
return tmp;
}
static inline void __put_unaligned_memmove16(u16 val, void *p)
{
memmove(p, &val, 2);
}
static inline void __put_unaligned_memmove32(u32 val, void *p)
{
memmove(p, &val, 4);
}
static inline void __put_unaligned_memmove64(u64 val, void *p)
{
memmove(p, &val, 8);
}
#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
......@@ -430,7 +430,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
ip6h = ipv6_hdr(skb);
/* ip6h->version = 6; ip6h->priority = 0; */
put_unaligned(0x60, (unsigned char *)ip6h);
*(unsigned char *)ip6h = 0x60;
ip6h->flow_lbl[0] = 0;
ip6h->flow_lbl[1] = 0;
ip6h->flow_lbl[2] = 0;
......@@ -458,7 +458,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
iph = ip_hdr(skb);
/* iph->version = 4; iph->ihl = 5; */
put_unaligned(0x45, (unsigned char *)iph);
*(unsigned char *)iph = 0x45;
iph->tos = 0;
put_unaligned(htons(ip_len), &(iph->tot_len));
iph->id = htons(atomic_inc_return(&ip_ident));
......
......@@ -304,7 +304,7 @@ static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
if (!inbounds(e, sizeof(u8)))
goto fail;
if (data)
*data = get_unaligned((u8 *)e->pos);
*data = *((u8 *)e->pos);
e->pos += sizeof(u8);
return true;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment