Commit 5bf431fa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  [XFS] Update maintainers
  [XFS] use scalable vmap API
  [XFS] remove old vmap cache
  [XFS] make xfs_ino_t an unsigned long long
  [XFS] truncate readdir offsets to signed 32 bit values
  [XFS] fix compile of xfs_btree_readahead_lblock on m68k
  [XFS] Remove macro-to-function indirections in the mask code
  [XFS] Remove macro-to-function indirections in attr code
  [XFS] Remove several unused typedefs.
  [XFS] pass XFS_IGET_BULKSTAT to xfs_iget for handle operations
parents c2919f2a cb7a97d0
...@@ -4842,11 +4842,11 @@ S: Supported ...@@ -4842,11 +4842,11 @@ S: Supported
XFS FILESYSTEM XFS FILESYSTEM
P: Silicon Graphics Inc P: Silicon Graphics Inc
P: Tim Shimmin P: Bill O'Donnell
M: xfs-masters@oss.sgi.com M: xfs-masters@oss.sgi.com
L: xfs@oss.sgi.com L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git T: git://oss.sgi.com/xfs/xfs.git
S: Supported S: Supported
XILINX SYSTEMACE DRIVER XILINX SYSTEMACE DRIVER
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
extern struct workqueue_struct *xfsdatad_workqueue; extern struct workqueue_struct *xfsdatad_workqueue;
extern mempool_t *xfs_ioend_pool; extern mempool_t *xfs_ioend_pool;
typedef void (*xfs_ioend_func_t)(void *);
/* /*
* xfs_ioend struct manages large extent writes for XFS. * xfs_ioend struct manages large extent writes for XFS.
* It can manage several multi-page bio's at once. * It can manage several multi-page bio's at once.
......
...@@ -165,75 +165,6 @@ test_page_region( ...@@ -165,75 +165,6 @@ test_page_region(
return (mask && (page_private(page) & mask) == mask); return (mask && (page_private(page) & mask) == mask);
} }
/*
* Mapping of multi-page buffers into contiguous virtual space
*/
typedef struct a_list {
void *vm_addr;
struct a_list *next;
} a_list_t;
static a_list_t *as_free_head;
static int as_list_len;
static DEFINE_SPINLOCK(as_lock);
/*
* Try to batch vunmaps because they are costly.
*/
STATIC void
free_address(
void *addr)
{
a_list_t *aentry;
#ifdef CONFIG_XEN
/*
* Xen needs to be able to make sure it can get an exclusive
* RO mapping of pages it wants to turn into a pagetable. If
* a newly allocated page is also still being vmap()ed by xfs,
* it will cause pagetable construction to fail. This is a
* quick workaround to always eagerly unmap pages so that Xen
* is happy.
*/
vunmap(addr);
return;
#endif
aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
if (likely(aentry)) {
spin_lock(&as_lock);
aentry->next = as_free_head;
aentry->vm_addr = addr;
as_free_head = aentry;
as_list_len++;
spin_unlock(&as_lock);
} else {
vunmap(addr);
}
}
STATIC void
purge_addresses(void)
{
a_list_t *aentry, *old;
if (as_free_head == NULL)
return;
spin_lock(&as_lock);
aentry = as_free_head;
as_free_head = NULL;
as_list_len = 0;
spin_unlock(&as_lock);
while ((old = aentry) != NULL) {
vunmap(aentry->vm_addr);
aentry = aentry->next;
kfree(old);
}
}
/* /*
* Internal xfs_buf_t object manipulation * Internal xfs_buf_t object manipulation
*/ */
...@@ -333,7 +264,7 @@ xfs_buf_free( ...@@ -333,7 +264,7 @@ xfs_buf_free(
uint i; uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
free_address(bp->b_addr - bp->b_offset); vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i]; struct page *page = bp->b_pages[i];
...@@ -455,10 +386,8 @@ _xfs_buf_map_pages( ...@@ -455,10 +386,8 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED; bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) { } else if (flags & XBF_MAPPED) {
if (as_list_len > 64) bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
purge_addresses(); -1, PAGE_KERNEL);
bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
VM_MAP, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL)) if (unlikely(bp->b_addr == NULL))
return -ENOMEM; return -ENOMEM;
bp->b_addr += bp->b_offset; bp->b_addr += bp->b_offset;
...@@ -1743,8 +1672,6 @@ xfsbufd( ...@@ -1743,8 +1672,6 @@ xfsbufd(
count++; count++;
} }
if (as_list_len > 0)
purge_addresses();
if (count) if (count)
blk_run_address_space(target->bt_mapping); blk_run_address_space(target->bt_mapping);
......
...@@ -126,11 +126,26 @@ xfs_nfs_get_inode( ...@@ -126,11 +126,26 @@ xfs_nfs_get_inode(
if (ino == 0) if (ino == 0)
return ERR_PTR(-ESTALE); return ERR_PTR(-ESTALE);
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); /*
if (error) * The XFS_IGET_BULKSTAT means that an invalid inode number is just
* fine and not an indication of a corrupted filesystem. Because
* clients can send any kind of invalid file handle, e.g. after
* a restore on the server we have to deal with this case gracefully.
*/
error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
XFS_ILOCK_SHARED, &ip, 0);
if (error) {
/*
* EINVAL means the inode cluster doesn't exist anymore.
* This implies the filehandle is stale, so we should
* translate it here.
* We don't use ESTALE directly down the chain to not
* confuse applications using bulkstat that expect EINVAL.
*/
if (error == EINVAL)
error = ESTALE;
return ERR_PTR(-error); return ERR_PTR(-error);
if (!ip) }
return ERR_PTR(-EIO);
if (ip->i_d.di_gen != generation) { if (ip->i_d.di_gen != generation) {
xfs_iput_new(ip, XFS_ILOCK_SHARED); xfs_iput_new(ip, XFS_ILOCK_SHARED);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* Access Control Lists * Access Control Lists
*/ */
typedef __uint16_t xfs_acl_perm_t; typedef __uint16_t xfs_acl_perm_t;
typedef __int32_t xfs_acl_type_t;
typedef __int32_t xfs_acl_tag_t; typedef __int32_t xfs_acl_tag_t;
typedef __int32_t xfs_acl_id_t; typedef __int32_t xfs_acl_id_t;
......
...@@ -231,7 +231,7 @@ typedef struct xfs_perag ...@@ -231,7 +231,7 @@ typedef struct xfs_perag
#define XFS_FSB_TO_AGNO(mp,fsbno) \ #define XFS_FSB_TO_AGNO(mp,fsbno) \
((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog)) ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
#define XFS_FSB_TO_AGBNO(mp,fsbno) \ #define XFS_FSB_TO_AGBNO(mp,fsbno) \
((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog))) ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
#define XFS_AGB_TO_DADDR(mp,agno,agbno) \ #define XFS_AGB_TO_DADDR(mp,agno,agbno) \
((xfs_daddr_t)XFS_FSB_TO_BB(mp, \ ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
(xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno))) (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
......
This diff is collapsed.
...@@ -151,8 +151,6 @@ typedef struct xfs_attr_leafblock { ...@@ -151,8 +151,6 @@ typedef struct xfs_attr_leafblock {
/* /*
* Cast typed pointers for "local" and "remote" name/value structs. * Cast typed pointers for "local" and "remote" name/value structs.
*/ */
#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) \
xfs_attr_leaf_name_remote(leafp,idx)
static inline xfs_attr_leaf_name_remote_t * static inline xfs_attr_leaf_name_remote_t *
xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
{ {
...@@ -160,8 +158,6 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) ...@@ -160,8 +158,6 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
} }
#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \
xfs_attr_leaf_name_local(leafp,idx)
static inline xfs_attr_leaf_name_local_t * static inline xfs_attr_leaf_name_local_t *
xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
{ {
...@@ -169,8 +165,6 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) ...@@ -169,8 +165,6 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
} }
#define XFS_ATTR_LEAF_NAME(leafp,idx) \
xfs_attr_leaf_name(leafp,idx)
static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
{ {
return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
...@@ -181,24 +175,18 @@ static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) ...@@ -181,24 +175,18 @@ static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
* a "local" name/value structure, a "remote" name/value structure, and * a "local" name/value structure, a "remote" name/value structure, and
* a pointer which might be either. * a pointer which might be either.
*/ */
#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) \
xfs_attr_leaf_entsize_remote(nlen)
static inline int xfs_attr_leaf_entsize_remote(int nlen) static inline int xfs_attr_leaf_entsize_remote(int nlen)
{ {
return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \ return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1); XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
} }
#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) \
xfs_attr_leaf_entsize_local(nlen,vlen)
static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen) static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
{ {
return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) + return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1); XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
} }
#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) \
xfs_attr_leaf_entsize_local_max(bsize)
static inline int xfs_attr_leaf_entsize_local_max(int bsize) static inline int xfs_attr_leaf_entsize_local_max(int bsize)
{ {
return (((bsize) >> 1) + ((bsize) >> 2)); return (((bsize) >> 1) + ((bsize) >> 2));
......
...@@ -23,24 +23,16 @@ ...@@ -23,24 +23,16 @@
*/ */
/* /*
* masks with n high/low bits set, 32-bit values & 64-bit values * masks with n high/low bits set, 64-bit values
*/ */
#define XFS_MASK32HI(n) xfs_mask32hi(n)
static inline __uint32_t xfs_mask32hi(int n)
{
return (__uint32_t)-1 << (32 - (n));
}
#define XFS_MASK64HI(n) xfs_mask64hi(n)
static inline __uint64_t xfs_mask64hi(int n) static inline __uint64_t xfs_mask64hi(int n)
{ {
return (__uint64_t)-1 << (64 - (n)); return (__uint64_t)-1 << (64 - (n));
} }
#define XFS_MASK32LO(n) xfs_mask32lo(n)
static inline __uint32_t xfs_mask32lo(int n) static inline __uint32_t xfs_mask32lo(int n)
{ {
return ((__uint32_t)1 << (n)) - 1; return ((__uint32_t)1 << (n)) - 1;
} }
#define XFS_MASK64LO(n) xfs_mask64lo(n)
static inline __uint64_t xfs_mask64lo(int n) static inline __uint64_t xfs_mask64lo(int n)
{ {
return ((__uint64_t)1 << (n)) - 1; return ((__uint64_t)1 << (n)) - 1;
......
...@@ -110,16 +110,16 @@ __xfs_bmbt_get_all( ...@@ -110,16 +110,16 @@ __xfs_bmbt_get_all(
ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
s->br_startoff = ((xfs_fileoff_t)l0 & s->br_startoff = ((xfs_fileoff_t)l0 &
XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
#if XFS_BIG_BLKNOS #if XFS_BIG_BLKNOS
s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) | s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)l1) >> 21); (((xfs_fsblock_t)l1) >> 21);
#else #else
#ifdef DEBUG #ifdef DEBUG
{ {
xfs_dfsbno_t b; xfs_dfsbno_t b;
b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) | b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)l1) >> 21); (((xfs_dfsbno_t)l1) >> 21);
ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
s->br_startblock = (xfs_fsblock_t)b; s->br_startblock = (xfs_fsblock_t)b;
...@@ -128,7 +128,7 @@ __xfs_bmbt_get_all( ...@@ -128,7 +128,7 @@ __xfs_bmbt_get_all(
s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
#endif /* DEBUG */ #endif /* DEBUG */
#endif /* XFS_BIG_BLKNOS */ #endif /* XFS_BIG_BLKNOS */
s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21)); s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
/* This is xfs_extent_state() in-line */ /* This is xfs_extent_state() in-line */
if (ext_flag) { if (ext_flag) {
ASSERT(s->br_blockcount != 0); /* saved for DMIG */ ASSERT(s->br_blockcount != 0); /* saved for DMIG */
...@@ -153,7 +153,7 @@ xfs_filblks_t ...@@ -153,7 +153,7 @@ xfs_filblks_t
xfs_bmbt_get_blockcount( xfs_bmbt_get_blockcount(
xfs_bmbt_rec_host_t *r) xfs_bmbt_rec_host_t *r)
{ {
return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21)); return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
} }
/* /*
...@@ -164,13 +164,13 @@ xfs_bmbt_get_startblock( ...@@ -164,13 +164,13 @@ xfs_bmbt_get_startblock(
xfs_bmbt_rec_host_t *r) xfs_bmbt_rec_host_t *r)
{ {
#if XFS_BIG_BLKNOS #if XFS_BIG_BLKNOS
return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) | return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)r->l1) >> 21); (((xfs_fsblock_t)r->l1) >> 21);
#else #else
#ifdef DEBUG #ifdef DEBUG
xfs_dfsbno_t b; xfs_dfsbno_t b;
b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) | b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)r->l1) >> 21); (((xfs_dfsbno_t)r->l1) >> 21);
ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
return (xfs_fsblock_t)b; return (xfs_fsblock_t)b;
...@@ -188,7 +188,7 @@ xfs_bmbt_get_startoff( ...@@ -188,7 +188,7 @@ xfs_bmbt_get_startoff(
xfs_bmbt_rec_host_t *r) xfs_bmbt_rec_host_t *r)
{ {
return ((xfs_fileoff_t)r->l0 & return ((xfs_fileoff_t)r->l0 &
XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
} }
xfs_exntst_t xfs_exntst_t
...@@ -219,7 +219,7 @@ xfs_filblks_t ...@@ -219,7 +219,7 @@ xfs_filblks_t
xfs_bmbt_disk_get_blockcount( xfs_bmbt_disk_get_blockcount(
xfs_bmbt_rec_t *r) xfs_bmbt_rec_t *r)
{ {
return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21)); return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
} }
/* /*
...@@ -230,7 +230,7 @@ xfs_bmbt_disk_get_startoff( ...@@ -230,7 +230,7 @@ xfs_bmbt_disk_get_startoff(
xfs_bmbt_rec_t *r) xfs_bmbt_rec_t *r)
{ {
return ((xfs_fileoff_t)be64_to_cpu(r->l0) & return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
} }
...@@ -248,33 +248,33 @@ xfs_bmbt_set_allf( ...@@ -248,33 +248,33 @@ xfs_bmbt_set_allf(
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
#if XFS_BIG_BLKNOS #if XFS_BIG_BLKNOS
ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) | ((xfs_bmbt_rec_base_t)startoff << 9) |
((xfs_bmbt_rec_base_t)startblock >> 43); ((xfs_bmbt_rec_base_t)startblock >> 43);
r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
#else /* !XFS_BIG_BLKNOS */ #else /* !XFS_BIG_BLKNOS */
if (ISNULLSTARTBLOCK(startblock)) { if (ISNULLSTARTBLOCK(startblock)) {
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) | ((xfs_bmbt_rec_base_t)startoff << 9) |
(xfs_bmbt_rec_base_t)XFS_MASK64LO(9); (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
r->l1 = XFS_MASK64HI(11) | r->l1 = xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} else { } else {
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9); ((xfs_bmbt_rec_base_t)startoff << 9);
r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} }
#endif /* XFS_BIG_BLKNOS */ #endif /* XFS_BIG_BLKNOS */
} }
...@@ -306,11 +306,11 @@ xfs_bmbt_disk_set_allf( ...@@ -306,11 +306,11 @@ xfs_bmbt_disk_set_allf(
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
#if XFS_BIG_BLKNOS #if XFS_BIG_BLKNOS
ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = cpu_to_be64( r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)extent_flag << 63) |
...@@ -319,17 +319,17 @@ xfs_bmbt_disk_set_allf( ...@@ -319,17 +319,17 @@ xfs_bmbt_disk_set_allf(
r->l1 = cpu_to_be64( r->l1 = cpu_to_be64(
((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
#else /* !XFS_BIG_BLKNOS */ #else /* !XFS_BIG_BLKNOS */
if (ISNULLSTARTBLOCK(startblock)) { if (ISNULLSTARTBLOCK(startblock)) {
r->l0 = cpu_to_be64( r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) | ((xfs_bmbt_rec_base_t)startoff << 9) |
(xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
r->l1 = cpu_to_be64(XFS_MASK64HI(11) | r->l1 = cpu_to_be64(xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
} else { } else {
r->l0 = cpu_to_be64( r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)extent_flag << 63) |
...@@ -337,7 +337,7 @@ xfs_bmbt_disk_set_allf( ...@@ -337,7 +337,7 @@ xfs_bmbt_disk_set_allf(
r->l1 = cpu_to_be64( r->l1 = cpu_to_be64(
((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount & ((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
} }
#endif /* XFS_BIG_BLKNOS */ #endif /* XFS_BIG_BLKNOS */
} }
...@@ -362,9 +362,9 @@ xfs_bmbt_set_blockcount( ...@@ -362,9 +362,9 @@ xfs_bmbt_set_blockcount(
xfs_bmbt_rec_host_t *r, xfs_bmbt_rec_host_t *r,
xfs_filblks_t v) xfs_filblks_t v)
{ {
ASSERT((v & XFS_MASK64HI(43)) == 0); ASSERT((v & xfs_mask64hi(43)) == 0);
r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
(xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21)); (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
} }
/* /*
...@@ -376,21 +376,21 @@ xfs_bmbt_set_startblock( ...@@ -376,21 +376,21 @@ xfs_bmbt_set_startblock(
xfs_fsblock_t v) xfs_fsblock_t v)
{ {
#if XFS_BIG_BLKNOS #if XFS_BIG_BLKNOS
ASSERT((v & XFS_MASK64HI(12)) == 0); ASSERT((v & xfs_mask64hi(12)) == 0);
r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) | r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
(xfs_bmbt_rec_base_t)(v >> 43); (xfs_bmbt_rec_base_t)(v >> 43);
r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
(xfs_bmbt_rec_base_t)(v << 21); (xfs_bmbt_rec_base_t)(v << 21);
#else /* !XFS_BIG_BLKNOS */ #else /* !XFS_BIG_BLKNOS */
if (ISNULLSTARTBLOCK(v)) { if (ISNULLSTARTBLOCK(v)) {
r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) | r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)v << 21) | ((xfs_bmbt_rec_base_t)v << 21) |
(r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} else { } else {
r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9); r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
(r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} }
#endif /* XFS_BIG_BLKNOS */ #endif /* XFS_BIG_BLKNOS */
} }
...@@ -403,10 +403,10 @@ xfs_bmbt_set_startoff( ...@@ -403,10 +403,10 @@ xfs_bmbt_set_startoff(
xfs_bmbt_rec_host_t *r, xfs_bmbt_rec_host_t *r,
xfs_fileoff_t v) xfs_fileoff_t v)
{ {
ASSERT((v & XFS_MASK64HI(9)) == 0); ASSERT((v & xfs_mask64hi(9)) == 0);
r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) | r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
((xfs_bmbt_rec_base_t)v << 9) | ((xfs_bmbt_rec_base_t)v << 9) |
(r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
} }
/* /*
...@@ -419,9 +419,9 @@ xfs_bmbt_set_state( ...@@ -419,9 +419,9 @@ xfs_bmbt_set_state(
{ {
ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN); ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
if (v == XFS_EXT_NORM) if (v == XFS_EXT_NORM)
r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN); r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
else else
r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN); r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
} }
/* /*
......
...@@ -730,8 +730,8 @@ xfs_btree_readahead_lblock( ...@@ -730,8 +730,8 @@ xfs_btree_readahead_lblock(
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
int rval = 0; int rval = 0;
xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, left, 1); xfs_btree_reada_bufl(cur->bc_mp, left, 1);
......
...@@ -517,9 +517,9 @@ xfs_dir2_block_getdents( ...@@ -517,9 +517,9 @@ xfs_dir2_block_getdents(
/* /*
* If it didn't fit, set the final offset to here & return. * If it didn't fit, set the final offset to here & return.
*/ */
if (filldir(dirent, dep->name, dep->namelen, cook, if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
ino, DT_UNKNOWN)) { ino, DT_UNKNOWN)) {
*offset = cook; *offset = cook & 0x7fffffff;
xfs_da_brelse(NULL, bp); xfs_da_brelse(NULL, bp);
return 0; return 0;
} }
...@@ -529,7 +529,8 @@ xfs_dir2_block_getdents( ...@@ -529,7 +529,8 @@ xfs_dir2_block_getdents(
* Reached the end of the block. * Reached the end of the block.
* Set the offset to a non-existent block 1 and return. * Set the offset to a non-existent block 1 and return.
*/ */
*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
xfs_da_brelse(NULL, bp); xfs_da_brelse(NULL, bp);
return 0; return 0;
} }
......
...@@ -1092,7 +1092,7 @@ xfs_dir2_leaf_getdents( ...@@ -1092,7 +1092,7 @@ xfs_dir2_leaf_getdents(
* Won't fit. Return to caller. * Won't fit. Return to caller.
*/ */
if (filldir(dirent, dep->name, dep->namelen, if (filldir(dirent, dep->name, dep->namelen,
xfs_dir2_byte_to_dataptr(mp, curoff), xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
ino, DT_UNKNOWN)) ino, DT_UNKNOWN))
break; break;
...@@ -1108,9 +1108,9 @@ xfs_dir2_leaf_getdents( ...@@ -1108,9 +1108,9 @@ xfs_dir2_leaf_getdents(
* All done. Set output offset value to current offset. * All done. Set output offset value to current offset.
*/ */
if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
*offset = XFS_DIR2_MAX_DATAPTR; *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
else else
*offset = xfs_dir2_byte_to_dataptr(mp, curoff); *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
kmem_free(map); kmem_free(map);
if (bp) if (bp)
xfs_da_brelse(NULL, bp); xfs_da_brelse(NULL, bp);
......
...@@ -752,8 +752,8 @@ xfs_dir2_sf_getdents( ...@@ -752,8 +752,8 @@ xfs_dir2_sf_getdents(
#if XFS_BIG_INUMS #if XFS_BIG_INUMS
ino += mp->m_inoadd; ino += mp->m_inoadd;
#endif #endif
if (filldir(dirent, ".", 1, dot_offset, ino, DT_DIR)) { if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
*offset = dot_offset; *offset = dot_offset & 0x7fffffff;
return 0; return 0;
} }
} }
...@@ -766,8 +766,8 @@ xfs_dir2_sf_getdents( ...@@ -766,8 +766,8 @@ xfs_dir2_sf_getdents(
#if XFS_BIG_INUMS #if XFS_BIG_INUMS
ino += mp->m_inoadd; ino += mp->m_inoadd;
#endif #endif
if (filldir(dirent, "..", 2, dotdot_offset, ino, DT_DIR)) { if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
*offset = dotdot_offset; *offset = dotdot_offset & 0x7fffffff;
return 0; return 0;
} }
} }
...@@ -791,14 +791,15 @@ xfs_dir2_sf_getdents( ...@@ -791,14 +791,15 @@ xfs_dir2_sf_getdents(
#endif #endif
if (filldir(dirent, sfep->name, sfep->namelen, if (filldir(dirent, sfep->name, sfep->namelen,
off, ino, DT_UNKNOWN)) { off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off; *offset = off & 0x7fffffff;
return 0; return 0;
} }
sfep = xfs_dir2_sf_nextentry(sfp, sfep); sfep = xfs_dir2_sf_nextentry(sfp, sfep);
} }
*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
return 0; return 0;
} }
......
...@@ -45,7 +45,7 @@ typedef __uint32_t prid_t; /* project ID */ ...@@ -45,7 +45,7 @@ typedef __uint32_t prid_t; /* project ID */
typedef __uint32_t inst_t; /* an instruction */ typedef __uint32_t inst_t; /* an instruction */
typedef __s64 xfs_off_t; /* <file offset> type */ typedef __s64 xfs_off_t; /* <file offset> type */
typedef __u64 xfs_ino_t; /* <inode> type */ typedef unsigned long long xfs_ino_t; /* <inode> type */
typedef __s64 xfs_daddr_t; /* <disk address> type */ typedef __s64 xfs_daddr_t; /* <disk address> type */
typedef char * xfs_caddr_t; /* <core address> type */ typedef char * xfs_caddr_t; /* <core address> type */
typedef __u32 xfs_dev_t; typedef __u32 xfs_dev_t;
...@@ -111,8 +111,6 @@ typedef __uint64_t xfs_fileoff_t; /* block number in a file */ ...@@ -111,8 +111,6 @@ typedef __uint64_t xfs_fileoff_t; /* block number in a file */
typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
typedef __uint8_t xfs_arch_t; /* architecture of an xfs fs */
/* /*
* Null values for the types. * Null values for the types.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment