Commit 58ae7468 authored by Richard Weinberger's avatar Richard Weinberger Committed by Theodore Ts'o

fscrypt: factor out bio specific functions

That way we can get rid of the direct dependency on CONFIG_BLOCK.

Fixes: d475a507 ("ubifs: Add skeleton for fscrypto")
Reported-by: default avatarArnd Bergmann <arnd@arndb.de>
Reported-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Gstir <david@sigma-star.at>
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent efee590e
config FS_ENCRYPTION config FS_ENCRYPTION
tristate "FS Encryption (Per-file encryption)" tristate "FS Encryption (Per-file encryption)"
depends on BLOCK
select CRYPTO select CRYPTO
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_CBC select CRYPTO_CBC
......
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
fscrypto-y := crypto.o fname.o policy.o keyinfo.o fscrypto-y := crypto.o fname.o policy.o keyinfo.o
fscrypto-$(CONFIG_BLOCK) += bio.o
/*
* This contains encryption functions for per-file encryption.
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*
* Written by Michael Halcrow, 2014.
*
* Filename encryption additions
* Uday Savagaonkar, 2014
* Encryption policy handling additions
* Ildar Muslukhov, 2014
* Add fscrypt_pullback_bio_page()
* Jaegeuk Kim, 2015.
*
* This has not yet undergone a rigorous security audit.
*
* The usage of AES-XTS should conform to recommendations in NIST
* Special Publication 800-38E and IEEE P1619/D16.
*/
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/namei.h>
#include "fscrypt_private.h"
/*
* Call fscrypt_decrypt_page on every single page, reusing the encryption
* context.
*/
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else {
SetPageUptodate(page);
}
unlock_page(page);
}
fscrypt_release_ctx(ctx);
bio_put(bio);
}
void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
}
EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
struct fscrypt_ctx *ctx;
struct page *bounce_page;
/* The bounce data pages are unmapped. */
if ((*page)->mapping)
return;
/* The bounce data page is unmapped. */
bounce_page = *page;
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
/* restore control page */
*page = ctx->w.control_page;
if (restore)
fscrypt_restore_control_page(bounce_page);
}
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
while (len--) {
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS);
if (err)
goto errout;
bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
}
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
/* should never happen! */
WARN_ON(1);
bio_put(bio);
err = -EIO;
goto errout;
}
err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error)
err = -EIO;
bio_put(bio);
if (err)
goto errout;
lblk++;
pblk++;
}
err = 0;
errout:
fscrypt_release_ctx(ctx);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/bio.h>
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/namei.h> #include <linux/namei.h>
#include "fscrypt_private.h" #include "fscrypt_private.h"
...@@ -44,7 +43,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL; ...@@ -44,7 +43,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static LIST_HEAD(fscrypt_free_ctxs); static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock); static DEFINE_SPINLOCK(fscrypt_ctx_lock);
static struct workqueue_struct *fscrypt_read_workqueue; struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex); static DEFINE_MUTEX(fscrypt_init_mutex);
static struct kmem_cache *fscrypt_ctx_cachep; static struct kmem_cache *fscrypt_ctx_cachep;
...@@ -141,16 +140,10 @@ static void page_crypt_complete(struct crypto_async_request *req, int res) ...@@ -141,16 +140,10 @@ static void page_crypt_complete(struct crypto_async_request *req, int res)
complete(&ecr->completion); complete(&ecr->completion);
} }
typedef enum { int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
FS_DECRYPT = 0, u64 lblk_num, struct page *src_page,
FS_ENCRYPT, struct page *dest_page, unsigned int len,
} fscrypt_direction_t; unsigned int offs, gfp_t gfp_flags)
static int do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page, struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags)
{ {
struct { struct {
__le64 index; __le64 index;
...@@ -205,7 +198,8 @@ static int do_page_crypto(const struct inode *inode, ...@@ -205,7 +198,8 @@ static int do_page_crypto(const struct inode *inode,
return 0; return 0;
} }
static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags)
{ {
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL) if (ctx->w.bounce_page == NULL)
...@@ -260,9 +254,9 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, ...@@ -260,9 +254,9 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */ /* with inplace-encryption we just encrypt the page */
err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
page, ciphertext_page, ciphertext_page, len, offs,
len, offs, gfp_flags); gfp_flags);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -276,14 +270,14 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, ...@@ -276,14 +270,14 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
return (struct page *)ctx; return (struct page *)ctx;
/* The encryption operation will require a bounce page. */ /* The encryption operation will require a bounce page. */
ciphertext_page = alloc_bounce_page(ctx, gfp_flags); ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page)) if (IS_ERR(ciphertext_page))
goto errout; goto errout;
ctx->w.control_page = page; ctx->w.control_page = page;
err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page, page, ciphertext_page, len, offs,
len, offs, gfp_flags); gfp_flags);
if (err) { if (err) {
ciphertext_page = ERR_PTR(err); ciphertext_page = ERR_PTR(err);
goto errout; goto errout;
...@@ -320,72 +314,11 @@ int fscrypt_decrypt_page(const struct inode *inode, struct page *page, ...@@ -320,72 +314,11 @@ int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len, return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
offs, GFP_NOFS); len, offs, GFP_NOFS);
} }
EXPORT_SYMBOL(fscrypt_decrypt_page); EXPORT_SYMBOL(fscrypt_decrypt_page);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
while (len--) {
err = do_page_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS);
if (err)
goto errout;
bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
}
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
/* should never happen! */
WARN_ON(1);
bio_put(bio);
err = -EIO;
goto errout;
}
err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error)
err = -EIO;
bio_put(bio);
if (err)
goto errout;
lblk++;
pblk++;
}
err = 0;
errout:
fscrypt_release_ctx(ctx);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);
/* /*
* Validate dentries for encrypted directories to make sure we aren't * Validate dentries for encrypted directories to make sure we aren't
* potentially caching stale data after a key has been added or * potentially caching stale data after a key has been added or
...@@ -442,64 +375,6 @@ const struct dentry_operations fscrypt_d_ops = { ...@@ -442,64 +375,6 @@ const struct dentry_operations fscrypt_d_ops = {
}; };
EXPORT_SYMBOL(fscrypt_d_ops); EXPORT_SYMBOL(fscrypt_d_ops);
/*
* Call fscrypt_decrypt_page on every single page, reusing the encryption
* context.
*/
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else {
SetPageUptodate(page);
}
unlock_page(page);
}
fscrypt_release_ctx(ctx);
bio_put(bio);
}
void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
}
EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
struct fscrypt_ctx *ctx;
struct page *bounce_page;
/* The bounce data pages are unmapped. */
if ((*page)->mapping)
return;
/* The bounce data page is unmapped. */
bounce_page = *page;
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
/* restore control page */
*page = ctx->w.control_page;
if (restore)
fscrypt_restore_control_page(bounce_page);
}
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
void fscrypt_restore_control_page(struct page *page) void fscrypt_restore_control_page(struct page *page)
{ {
struct fscrypt_ctx *ctx; struct fscrypt_ctx *ctx;
......
...@@ -71,6 +71,11 @@ struct fscrypt_info { ...@@ -71,6 +71,11 @@ struct fscrypt_info {
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
}; };
typedef enum {
FS_DECRYPT = 0,
FS_ENCRYPT,
} fscrypt_direction_t;
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
...@@ -85,7 +90,16 @@ struct fscrypt_completion_result { ...@@ -85,7 +90,16 @@ struct fscrypt_completion_result {
/* crypto.c */ /* crypto.c */
int fscrypt_initialize(unsigned int cop_flags); extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
extern int fscrypt_do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,
struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
/* keyinfo.c */ /* keyinfo.c */
extern int fscrypt_get_crypt_info(struct inode *); extern int fscrypt_get_crypt_info(struct inode *);
......
...@@ -174,11 +174,8 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, ...@@ -174,11 +174,8 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
u64, gfp_t); u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64); unsigned int, u64);
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern void fscrypt_restore_control_page(struct page *); extern void fscrypt_restore_control_page(struct page *);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */ /* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
...@@ -201,6 +198,12 @@ extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, ...@@ -201,6 +198,12 @@ extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *); const struct fscrypt_str *, struct fscrypt_str *);
extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
struct fscrypt_str *); struct fscrypt_str *);
/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
#endif #endif
/* crypto.c */ /* crypto.c */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment