Commit 25cd6f35 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt

Pull fscrypt updates from Eric Biggers:

 - Preparations for supporting encryption on ext4 filesystems where the
   filesystem block size is smaller than PAGE_SIZE.

 - Don't allow setting encryption policies on dead directories.

 - Various cleanups.

* tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt:
  fscrypt: document testing with xfstests
  fscrypt: remove selection of CONFIG_CRYPTO_SHA256
  fscrypt: remove unnecessary includes of ratelimit.h
  fscrypt: don't set policy for a dead directory
  ext4: encrypt only up to last block in ext4_bio_write_page()
  ext4: decrypt only the needed block in __ext4_block_zero_page_range()
  ext4: decrypt only the needed blocks in ext4_block_write_begin()
  ext4: clear BH_Uptodate flag on decryption error
  fscrypt: decrypt only the needed blocks in __fscrypt_decrypt_bio()
  fscrypt: support decrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_decrypt_block_inplace()
  fscrypt: handle blocksize < PAGE_SIZE in fscrypt_zeroout_range()
  fscrypt: support encrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_encrypt_block_inplace()
  fscrypt: clean up some BUG_ON()s in block encryption/decryption
  fscrypt: rename fscrypt_do_page_crypto() to fscrypt_crypt_block()
  fscrypt: remove the "write" part of struct fscrypt_ctx
  fscrypt: simplify bounce page handling
parents 40f06c79 05643363
......@@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported:
If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS.
crypto accelerators such as CAAM or CESA that do not support XTS. To
use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
implementation) must be enabled so that ESSIV can be used.
Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true
......@@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace
without the key is subject to change in the future. It is only meant
as a way to temporarily present valid filenames so that commands like
``rm -r`` work as expected on encrypted directories.
Tests
=====
To test fscrypt, use xfstests, which is Linux's de facto standard
filesystem test suite. First, run all the tests in the "encrypt"
group on the relevant filesystem(s). For example, to test ext4 and
f2fs encryption using `kvm-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_::
kvm-xfstests -c ext4,f2fs -g encrypt
UBIFS encryption can also be tested this way, but it should be done in
a separate command, and it takes some time for kvm-xfstests to set up
emulated UBI volumes::
kvm-xfstests -c ubifs -g encrypt
No tests should fail. However, tests that use non-default encryption
modes (e.g. generic/549 and generic/550) will be skipped if the needed
algorithms were not built into the kernel's crypto API. Also, tests
that access the raw block device (e.g. generic/399, generic/548,
generic/549, generic/550) will be skipped on UBIFS.
Besides running the "encrypt" group tests, for ext4 and f2fs it's also
possible to run most xfstests with the "test_dummy_encryption" mount
option. This option causes all new files to be automatically
encrypted with a dummy key, without having to make any API calls.
This tests the encrypted I/O paths more thoroughly. To do this with
kvm-xfstests, use the "encrypt" filesystem configuration::
kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
Because this runs many more tests than "-g encrypt" does, it takes
much longer to run; so also consider using `gce-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_
instead of kvm-xfstests::
gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
......@@ -7,7 +7,6 @@ config FS_ENCRYPTION
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_SHA256
select KEYS
help
Enable encryption of files and directories. This
......
......@@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
bv->bv_offset);
if (ret)
SetPageError(page);
else if (done)
......@@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio);
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
struct bio *bio = ctx->bio;
__fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx);
......@@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work)
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->r.work);
INIT_WORK(&ctx->work, completion_pages);
ctx->bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
struct fscrypt_ctx *ctx;
struct page *bounce_page;
/* The bounce data pages are unmapped. */
if ((*page)->mapping)
return;
/* The bounce data page is unmapped. */
bounce_page = *page;
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
/* restore control page */
*page = ctx->w.control_page;
if (restore)
fscrypt_restore_control_page(bounce_page);
}
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
struct page *ciphertext_page;
struct bio *bio;
int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = fscrypt_get_ctx(GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
return -ENOMEM;
while (len--) {
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS);
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
blocksize, 0, GFP_NOFS);
if (err)
goto errout;
......@@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
goto errout;
}
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
if (WARN_ON(ret != blocksize)) {
/* should never happen! */
WARN_ON(1);
bio_put(bio);
err = -EIO;
goto errout;
......@@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
err = 0;
errout:
fscrypt_release_ctx(ctx);
fscrypt_free_bounce_page(ciphertext_page);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);
This diff is collapsed.
......@@ -12,7 +12,6 @@
*/
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
......
......@@ -94,7 +94,6 @@ typedef enum {
} fscrypt_direction_t;
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
......@@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
extern int fscrypt_do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,
struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
extern int fscrypt_crypt_block(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page, struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold
......
......@@ -5,7 +5,6 @@
* Encryption hooks for higher-level filesystem operations.
*/
#include <linux/ratelimit.h>
#include "fscrypt_private.h"
/**
......
......@@ -12,7 +12,6 @@
#include <keys/user-type.h>
#include <linux/hashtable.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
......
......@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
if (ret == -ENODATA) {
if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR;
else if (IS_DEADDIR(inode))
ret = -ENOENT;
else if (!inode->i_sb->s_cop->empty_dir(inode))
ret = -ENOTEMPTY;
else
......
......@@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
struct buffer_head *bh, *head, *wait[2];
int nr_wait = 0;
int i;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
......@@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
wait[nr_wait++] = bh;
}
}
/*
* If we issued read requests, let them complete.
*/
while (wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
for (i = 0; i < nr_wait; i++) {
wait_on_buffer(wait[i]);
if (!buffer_uptodate(wait[i]))
err = -EIO;
}
if (unlikely(err))
if (unlikely(err)) {
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
} else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
for (i = 0; i < nr_wait; i++) {
int err2;
err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
bh_offset(wait[i]));
if (err2) {
clear_buffer_uptodate(wait[i]);
err = err2;
}
}
}
return err;
}
#endif
......@@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
page, PAGE_SIZE, 0, page->index));
WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
page, blocksize, bh_offset(bh)));
}
}
if (ext4_should_journal_data(inode)) {
......
......@@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
#ifdef CONFIG_FS_ENCRYPTION
struct page *data_page = NULL;
#endif
struct page *bounce_page = NULL;
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len;
......@@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
#ifdef CONFIG_FS_ENCRYPTION
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
fscrypt_pullback_bio_page(&page, false);
if (fscrypt_is_bounce_page(page)) {
bounce_page = page;
page = fscrypt_pagecache_page(bounce_page);
}
#endif
if (bio->bi_status) {
SetPageError(page);
......@@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
if (!under_io) {
#ifdef CONFIG_FS_ENCRYPTION
if (data_page)
fscrypt_restore_control_page(data_page);
#endif
fscrypt_free_bounce_page(bounce_page);
end_page_writeback(page);
}
}
......@@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc,
bool keep_towrite)
{
struct page *data_page = NULL;
struct page *bounce_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start;
struct buffer_head *bh, *head;
......@@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
bh = head = page_buffers(page);
/*
* If any blocks are being written to an encrypted file, encrypt them
* into a bounce page. For simplicity, just encrypt until the last
* block which might be needed. This may cause some unneeded blocks
* (e.g. holes) to be unnecessarily encrypted, but this is rare and
* can't happen in the common case of blocksize == PAGE_SIZE.
*/
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
retry_encrypt:
data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
if (io->io_bio) {
ext4_io_submit(io);
......@@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
data_page = NULL;
bounce_page = NULL;
goto out;
}
}
......@@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
ret = io_submit_add_bh(io, inode,
data_page ? data_page : page, bh);
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
......@@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */
if (ret) {
out:
if (data_page)
fscrypt_restore_control_page(data_page);
fscrypt_free_bounce_page(bounce_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
......
......@@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio)
continue;
}
fscrypt_pullback_bio_page(&page, true);
fscrypt_finalize_bounce_page(&page);
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
......@@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
bio_for_each_segment_all(bvec, io->bio, iter_all) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
else
target = fscrypt_control_page(bvec->bv_page);
target = bvec->bv_page;
if (fscrypt_is_bounce_page(target))
target = fscrypt_pagecache_page(target);
if (inode && inode == target->mapping->host)
return true;
......@@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
PAGE_SIZE, 0,
gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
......@@ -1900,8 +1900,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
err = f2fs_inplace_write_data(fio);
if (err) {
if (f2fs_encrypted_file(inode))
fscrypt_pullback_bio_page(&fio->encrypted_page,
true);
fscrypt_finalize_bounce_page(&fio->encrypted_page);
if (PageWriteback(page))
end_page_writeback(page);
} else {
......
......@@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
{
struct ubifs_info *c = inode->i_sb->s_fs_info;
void *p = &dn->data;
struct page *ret;
unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
int err;
ubifs_assert(c, pad_len <= *out_len);
dn->compr_size = cpu_to_le16(in_len);
......@@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
if (pad_len != in_len)
memset(p + in_len, 0, pad_len - in_len);
ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len,
offset_in_page(&dn->data), block, GFP_NOFS);
if (IS_ERR(ret)) {
ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret));
return PTR_ERR(ret);
err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len,
offset_in_page(p), block, GFP_NOFS);
if (err) {
ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err);
return err;
}
*out_len = pad_len;
......@@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
}
ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE);
err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen,
offset_in_page(&dn->data), block);
err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data),
dlen, offset_in_page(&dn->data),
block);
if (err) {
ubifs_err(c, "fscrypt_decrypt_page failed: %i", err);
ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err);
return err;
}
*out_len = clen;
......
......@@ -63,16 +63,13 @@ struct fscrypt_operations {
unsigned int max_namelen;
};
/* Decryption work */
struct fscrypt_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
};
struct list_head free_list; /* Free list */
};
u8 flags; /* Flags */
......@@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
static inline struct page *fscrypt_control_page(struct page *page)
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags);
extern int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags);
extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs);
extern int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num);
static inline bool fscrypt_is_bounce_page(struct page *page)
{
return page->mapping == NULL;
}
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
return (struct page *)page_private(bounce_page);
}
extern void fscrypt_restore_control_page(struct page *);
extern void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
......@@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
......@@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
return;
}
static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
}
static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_decrypt_page(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs, u64 lblk_num)
{
return -EOPNOTSUPP;
}
static inline struct page *fscrypt_control_page(struct page *page)
static inline bool fscrypt_is_bounce_page(struct page *page)
{
return false;
}
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
static inline void fscrypt_restore_control_page(struct page *page)
static inline void fscrypt_free_bounce_page(struct page *bounce_page)
{
return;
}
/* policy.c */
......@@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
{
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
return;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
......@@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode,
return 0;
}
/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
static inline void fscrypt_finalize_bounce_page(struct page **pagep)
{
struct page *page = *pagep;
if (fscrypt_is_bounce_page(page)) {
*pagep = fscrypt_pagecache_page(page);
fscrypt_free_bounce_page(page);
}
}
#endif /* _LINUX_FSCRYPT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment