Commit be17a432 authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: Begin of sparse annotations: new data types and endianness conversion.

- Add le{16,32,64} as well as sle{16,32,64} data types to fs/ntfs/types.h.
- Change ntfschar to be le16 instead of u16 in fs/ntfs/types.h.
- Add le versions of VCN, LCN, and LSN called leVCN, leLCN, and leLSN,
  respectively, to fs/ntfs/types.h.
- Update endianness conversion macros in fs/ntfs/endian.h to use the
  new types as appropriate.
- Do proper type casting when using sle64_to_cpup() in fs/ntfs/dir.c
  and index.c.
Signed-off-by: default avatarAnton Altaparmakov <aia21@cantab.net>
parent d3762ba0
...@@ -34,6 +34,15 @@ ToDo/Notes: ...@@ -34,6 +34,15 @@ ToDo/Notes:
- Update ->truncate (fs/ntfs/inode.c::ntfs_truncate()) to check if the - Update ->truncate (fs/ntfs/inode.c::ntfs_truncate()) to check if the
inode size has changed and to only output an error if so. inode size has changed and to only output an error if so.
- Rename fs/ntfs/attrib.h::attribute_value_length() to ntfs_attr_size(). - Rename fs/ntfs/attrib.h::attribute_value_length() to ntfs_attr_size().
- Add le{16,32,64} as well as sle{16,32,64} data types to
fs/ntfs/types.h.
- Change ntfschar to be le16 instead of u16 in fs/ntfs/types.h.
- Add le versions of VCN, LCN, and LSN called leVCN, leLCN, and leLSN,
respectively, to fs/ntfs/types.h.
- Update endianness conversion macros in fs/ntfs/endian.h to use the
new types as appropriate.
- Do proper type casting when using sle64_to_cpup() in fs/ntfs/dir.c
and index.c.
2.1.18 - Fix scheduling latencies at mount time as well as an endianness bug. 2.1.18 - Fix scheduling latencies at mount time as well as an endianness bug.
......
...@@ -299,7 +299,7 @@ MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, ...@@ -299,7 +299,7 @@ MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
goto err_out; goto err_out;
} }
/* Get the starting vcn of the index_block holding the child node. */ /* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
ia_mapping = VFS_I(dir_ni)->i_mapping; ia_mapping = VFS_I(dir_ni)->i_mapping;
/* /*
* We are done with the index root and the mft record. Release them, * We are done with the index root and the mft record. Release them,
...@@ -551,7 +551,8 @@ MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, ...@@ -551,7 +551,8 @@ MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
} }
/* Child node present, descend into it. */ /* Child node present, descend into it. */
old_vcn = vcn; old_vcn = vcn;
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); vcn = sle64_to_cpup((sle64*)((u8*)ie +
le16_to_cpu(ie->length) - 8));
if (vcn >= 0) { if (vcn >= 0) {
/* If vcn is in the same page cache page as old_vcn we /* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */ * recycle the mapped page. */
......
...@@ -24,24 +24,70 @@ ...@@ -24,24 +24,70 @@
#define _LINUX_NTFS_ENDIAN_H #define _LINUX_NTFS_ENDIAN_H
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include "types.h"
/* /*
* Signed endianness conversion defines. * Signed endianness conversion functions.
*/ */
#define sle16_to_cpu(x) ((s16)__le16_to_cpu((s16)(x)))
#define sle32_to_cpu(x) ((s32)__le32_to_cpu((s32)(x)))
#define sle64_to_cpu(x) ((s64)__le64_to_cpu((s64)(x)))
#define sle16_to_cpup(x) ((s16)__le16_to_cpu(*(s16*)(x))) static inline s16 sle16_to_cpu(sle16 x)
#define sle32_to_cpup(x) ((s32)__le32_to_cpu(*(s32*)(x))) {
#define sle64_to_cpup(x) ((s64)__le64_to_cpu(*(s64*)(x))) return le16_to_cpu((__force le16)x);
}
#define cpu_to_sle16(x) ((s16)__cpu_to_le16((s16)(x))) static inline s32 sle32_to_cpu(sle32 x)
#define cpu_to_sle32(x) ((s32)__cpu_to_le32((s32)(x))) {
#define cpu_to_sle64(x) ((s64)__cpu_to_le64((s64)(x))) return le32_to_cpu((__force le32)x);
}
#define cpu_to_sle16p(x) ((s16)__cpu_to_le16(*(s16*)(x))) static inline s64 sle64_to_cpu(sle64 x)
#define cpu_to_sle32p(x) ((s32)__cpu_to_le32(*(s32*)(x))) {
#define cpu_to_sle64p(x) ((s64)__cpu_to_le64(*(s64*)(x))) return le64_to_cpu((__force le64)x);
}
static inline s16 sle16_to_cpup(sle16 *x)
{
return le16_to_cpu(*(__force le16*)x);
}
static inline s32 sle32_to_cpup(sle32 *x)
{
return le32_to_cpu(*(__force le32*)x);
}
static inline s64 sle64_to_cpup(sle64 *x)
{
return le64_to_cpu(*(__force le64*)x);
}
static inline sle16 cpu_to_sle16(s16 x)
{
return (__force sle16)cpu_to_le16(x);
}
static inline sle32 cpu_to_sle32(s32 x)
{
return (__force sle32)cpu_to_le32(x);
}
static inline sle64 cpu_to_sle64(s64 x)
{
return (__force sle64)cpu_to_le64(x);
}
static inline sle16 cpu_to_sle16p(s16 *x)
{
return (__force sle16)cpu_to_le16(*x);
}
static inline sle32 cpu_to_sle32p(s32 *x)
{
return (__force sle32)cpu_to_le32(*x);
}
static inline sle64 cpu_to_sle64p(s64 *x)
{
return (__force sle64)cpu_to_le64(*x);
}
#endif /* _LINUX_NTFS_ENDIAN_H */ #endif /* _LINUX_NTFS_ENDIAN_H */
...@@ -265,7 +265,7 @@ int ntfs_index_lookup(const void *key, const int key_len, ...@@ -265,7 +265,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
goto err_out; goto err_out;
} }
/* Get the starting vcn of the index_block holding the child node. */ /* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
ia_mapping = VFS_I(idx_ni)->i_mapping; ia_mapping = VFS_I(idx_ni)->i_mapping;
/* /*
* We are done with the index root and the mft record. Release them, * We are done with the index root and the mft record. Release them,
...@@ -427,7 +427,7 @@ int ntfs_index_lookup(const void *key, const int key_len, ...@@ -427,7 +427,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
} }
/* Child node present, descend into it. */ /* Child node present, descend into it. */
old_vcn = vcn; old_vcn = vcn;
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
if (vcn >= 0) { if (vcn >= 0) {
/* /*
* If vcn is in the same page cache page as old_vcn we recycle * If vcn is in the same page cache page as old_vcn we recycle
......
...@@ -23,8 +23,17 @@ ...@@ -23,8 +23,17 @@
#ifndef _LINUX_NTFS_TYPES_H #ifndef _LINUX_NTFS_TYPES_H
#define _LINUX_NTFS_TYPES_H #define _LINUX_NTFS_TYPES_H
#include <linux/types.h>
typedef __le16 le16;
typedef __le32 le32;
typedef __le64 le64;
typedef __u16 __bitwise sle16;
typedef __u32 __bitwise sle32;
typedef __u64 __bitwise sle64;
/* 2-byte Unicode character type. */ /* 2-byte Unicode character type. */
typedef u16 ntfschar; typedef le16 ntfschar;
#define UCHAR_T_SIZE_BITS 1 #define UCHAR_T_SIZE_BITS 1
/* /*
...@@ -32,7 +41,9 @@ typedef u16 ntfschar; ...@@ -32,7 +41,9 @@ typedef u16 ntfschar;
* and VCN, to allow for type checking and better code readability. * and VCN, to allow for type checking and better code readability.
*/ */
typedef s64 VCN; typedef s64 VCN;
typedef sle64 leVCN;
typedef s64 LCN; typedef s64 LCN;
typedef sle64 leLCN;
/* /*
* The NTFS journal $LogFile uses log sequence numbers which are signed 64-bit * The NTFS journal $LogFile uses log sequence numbers which are signed 64-bit
...@@ -40,6 +51,7 @@ typedef s64 LCN; ...@@ -40,6 +51,7 @@ typedef s64 LCN;
* code readability. * code readability.
*/ */
typedef s64 LSN; typedef s64 LSN;
typedef sle64 leLSN;
/** /**
* runlist_element - in memory vcn to lcn mapping array element * runlist_element - in memory vcn to lcn mapping array element
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment