Commit d453cc5a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux

Pull fsverity update from Eric Biggers:
 "Slightly improve data verification performance by eliminating an
  unnecessary lock"

* tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux:
  fsverity: remove hash page spin lock
parents 3bf95d56 8e43fb06
...@@ -69,7 +69,6 @@ struct fsverity_info { ...@@ -69,7 +69,6 @@ struct fsverity_info {
u8 file_digest[FS_VERITY_MAX_DIGEST_SIZE]; u8 file_digest[FS_VERITY_MAX_DIGEST_SIZE];
const struct inode *inode; const struct inode *inode;
unsigned long *hash_block_verified; unsigned long *hash_block_verified;
spinlock_t hash_page_init_lock;
}; };
#define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \ #define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \
......
...@@ -239,7 +239,6 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, ...@@ -239,7 +239,6 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode,
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }
spin_lock_init(&vi->hash_page_init_lock);
} }
return vi; return vi;
......
...@@ -19,7 +19,6 @@ static struct workqueue_struct *fsverity_read_workqueue; ...@@ -19,7 +19,6 @@ static struct workqueue_struct *fsverity_read_workqueue;
static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
unsigned long hblock_idx) unsigned long hblock_idx)
{ {
bool verified;
unsigned int blocks_per_page; unsigned int blocks_per_page;
unsigned int i; unsigned int i;
...@@ -43,12 +42,20 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, ...@@ -43,12 +42,20 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
* re-instantiated from the backing storage are re-verified. To do * re-instantiated from the backing storage are re-verified. To do
* this, we use PG_checked again, but now it doesn't really mean * this, we use PG_checked again, but now it doesn't really mean
* "checked". Instead, now it just serves as an indicator for whether * "checked". Instead, now it just serves as an indicator for whether
* the hash page is newly instantiated or not. * the hash page is newly instantiated or not. If the page is new, as
* indicated by PG_checked=0, we clear the bitmap bits for the page's
* blocks since they are untrustworthy, then set PG_checked=1.
* Otherwise we return the bitmap bit for the requested block.
* *
* The first thread that sees PG_checked=0 must clear the corresponding * Multiple threads may execute this code concurrently on the same page.
* bitmap bits, then set PG_checked=1. This requires a spinlock. To * This is safe because we use memory barriers to ensure that if a
* avoid having to take this spinlock in the common case of * thread sees PG_checked=1, then it also sees the associated bitmap
* PG_checked=1, we start with an opportunistic lockless read. * clearing to have occurred. Also, all writes and their corresponding
* reads are atomic, and all writes are safe to repeat in the event that
* multiple threads get into the PG_checked=0 section. (Clearing a
* bitmap bit again at worst causes a hash block to be verified
* redundantly. That event should be very rare, so it's not worth using
* a lock to avoid. Setting PG_checked again has no effect.)
*/ */
if (PageChecked(hpage)) { if (PageChecked(hpage)) {
/* /*
...@@ -58,24 +65,17 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, ...@@ -58,24 +65,17 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
smp_rmb(); smp_rmb();
return test_bit(hblock_idx, vi->hash_block_verified); return test_bit(hblock_idx, vi->hash_block_verified);
} }
spin_lock(&vi->hash_page_init_lock); blocks_per_page = vi->tree_params.blocks_per_page;
if (PageChecked(hpage)) { hblock_idx = round_down(hblock_idx, blocks_per_page);
verified = test_bit(hblock_idx, vi->hash_block_verified); for (i = 0; i < blocks_per_page; i++)
} else { clear_bit(hblock_idx + i, vi->hash_block_verified);
blocks_per_page = vi->tree_params.blocks_per_page; /*
hblock_idx = round_down(hblock_idx, blocks_per_page); * A write memory barrier is needed here to give RELEASE semantics to
for (i = 0; i < blocks_per_page; i++) * the below SetPageChecked() operation.
clear_bit(hblock_idx + i, vi->hash_block_verified); */
/* smp_wmb();
* A write memory barrier is needed here to give RELEASE SetPageChecked(hpage);
* semantics to the below SetPageChecked() operation. return false;
*/
smp_wmb();
SetPageChecked(hpage);
verified = false;
}
spin_unlock(&vi->hash_page_init_lock);
return verified;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment