Commit 223b2b88 authored by Steven Whitehouse's avatar Steven Whitehouse Committed by Steven Whitehouse

GFS2: Fix alignment issue and tidy gfs2_bitfit

An alignment issue with the existing bitfit algorithm was reported
on IA64. This patch attempts to fix that, and also to tidy up the
code a bit. There is now more documentation about how this works
and it has survived a number of different tests.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 64d576ba
...@@ -131,82 +131,90 @@ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, ...@@ -131,82 +131,90 @@ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
return cur_state; return cur_state;
} }
/**
* gfs2_bit_search
* @ptr: Pointer to bitmap data
* @mask: Mask to use (normally 0x55555.... but adjusted for search start)
* @state: The state we are searching for
*
* We xor the bitmap data with a patter which is the bitwise opposite
* of what we are looking for, this gives rise to a pattern of ones
* wherever there is a match. Since we have two bits per entry, we
* take this pattern, shift it down by one place and then and it with
* the original. All the even bit positions (0,2,4, etc) then represent
* successful matches, so we mask with 0x55555..... to remove the unwanted
* odd bit positions.
*
* This allows searching of a whole u64 at once (32 blocks) with a
* single test (on 64 bit arches).
*/
static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
{
u64 tmp;
static const u64 search[] = {
[0] = 0xffffffffffffffff,
[1] = 0xaaaaaaaaaaaaaaaa,
[2] = 0x5555555555555555,
[3] = 0x0000000000000000,
};
tmp = le64_to_cpu(*ptr) ^ search[state];
tmp &= (tmp >> 1);
tmp &= mask;
return tmp;
}
/** /**
* gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
* a block in a given allocation state. * a block in a given allocation state.
* @buffer: the buffer that holds the bitmaps * @buffer: the buffer that holds the bitmaps
* @buflen: the length (in bytes) of the buffer * @len: the length (in bytes) of the buffer
* @goal: start search at this block's bit-pair (within @buffer) * @goal: start search at this block's bit-pair (within @buffer)
* @old_state: GFS2_BLKST_XXX the state of the block we're looking for. * @state: GFS2_BLKST_XXX the state of the block we're looking for.
* *
* Scope of @goal and returned block number is only within this bitmap buffer, * Scope of @goal and returned block number is only within this bitmap buffer,
* not entire rgrp or filesystem. @buffer will be offset from the actual * not entire rgrp or filesystem. @buffer will be offset from the actual
* beginning of a bitmap block buffer, skipping any header structures. * beginning of a bitmap block buffer, skipping any header structures, but
* headers are always a multiple of 64 bits long so that the buffer is
* always aligned to a 64 bit boundary.
*
* The size of the buffer is in bytes, but is it assumed that it is
* always ok to to read a complete multiple of 64 bits at the end
* of the block in case the end is no aligned to a natural boundary.
* *
* Return: the block number (bitmap buffer scope) that was found * Return: the block number (bitmap buffer scope) that was found
*/ */
static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal, u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u32 goal, u8 state)
u8 old_state)
{ {
const u8 *byte, *start, *end; u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
int bit, startbit; const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
u32 g1, g2, misaligned; const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
unsigned long *plong; u64 tmp;
unsigned long lskipval; u64 mask = 0x5555555555555555;
u32 bit;
lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
g1 = (goal / GFS2_NBBY); BUG_ON(state > 3);
start = buffer + g1;
byte = start; /* Mask off bits we don't care about at the start of the search */
end = buffer + buflen; mask <<= spoint;
g2 = ALIGN(g1, sizeof(unsigned long)); tmp = gfs2_bit_search(ptr, mask, state);
plong = (unsigned long *)(buffer + g2); ptr++;
startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; while(tmp == 0 && ptr < end) {
misaligned = g2 - g1; tmp = gfs2_bit_search(ptr, 0x5555555555555555, state);
if (!misaligned) ptr++;
goto ulong_aligned; }
/* parse the bitmap a byte at a time */ /* Mask off any bits which are more than len bytes from the start */
misaligned: if (ptr == end && (len & (sizeof(u64) - 1)))
while (byte < end) { tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) { /* Didn't find anything, so return */
return goal + if (tmp == 0)
(((byte - start) * GFS2_NBBY) +
((bit - startbit) >> 1));
}
bit += GFS2_BIT_SIZE;
if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
bit = 0;
byte++;
misaligned--;
if (!misaligned) {
plong = (unsigned long *)byte;
goto ulong_aligned;
}
}
}
return BFITNOENT;
/* parse the bitmap a unsigned long at a time */
ulong_aligned:
/* Stop at "end - 1" or else prefetch can go past the end and segfault.
We could "if" it but we'd lose some of the performance gained.
This way will only slow down searching the very last 4/8 bytes
depending on architecture. I've experimented with several ways
of writing this section such as using an else before the goto
but this one seems to be the fastest. */
while ((unsigned char *)plong < end - sizeof(unsigned long)) {
prefetch(plong + 1);
if (((*plong) & LBITMASK) != lskipval)
break;
plong++;
}
if ((unsigned char *)plong < end) {
byte = (const u8 *)plong;
misaligned += sizeof(unsigned long) - 1;
goto misaligned;
}
return BFITNOENT; return BFITNOENT;
ptr--;
bit = fls64(tmp);
bit--; /* fls64 always adds one to the bit count */
bit /= 2; /* two bits per entry in the bitmap */
return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment