Commit bea9a6d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2:
  ocfs2: Silence gcc warning in ocfs2_write_zero_page().
  jbd2/ocfs2: Fix block checksumming when a buffer is used in several transactions
  ocfs2/dlm: Remove BUG_ON from migration in the rare case of a down node
  ocfs2: Don't duplicate pages past i_size during CoW.
  ocfs2: tighten up strlen() checking
  ocfs2: Make xattr reflink work with new local alloc reservation.
  ocfs2: make xattr extension work with new local alloc reservation.
  ocfs2: Remove the redundant cpu_to_le64.
  ocfs2/dlm: don't access beyond bitmap size
  ocfs2: No need to zero pages past i_size.
  ocfs2: Zero the tail cluster when extending past i_size.
  ocfs2: When zero extending, do it by page.
  ocfs2: Limit default local alloc size within bitmap range.
  ocfs2: Move orphan scan work to ocfs2_wq.
  fs/ocfs2/dlm: Add missing spin_unlock
parents cd9f040d 5453258d
...@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct page *new_page; struct page *new_page;
unsigned int new_offset; unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in); struct buffer_head *bh_in = jh2bh(jh_in);
struct jbd2_buffer_trigger_type *triggers;
journal_t *journal = transaction->t_journal; journal_t *journal = transaction->t_journal;
/* /*
...@@ -328,21 +327,21 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -328,21 +327,21 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
done_copy_out = 1; done_copy_out = 1;
new_page = virt_to_page(jh_in->b_frozen_data); new_page = virt_to_page(jh_in->b_frozen_data);
new_offset = offset_in_page(jh_in->b_frozen_data); new_offset = offset_in_page(jh_in->b_frozen_data);
triggers = jh_in->b_frozen_triggers;
} else { } else {
new_page = jh2bh(jh_in)->b_page; new_page = jh2bh(jh_in)->b_page;
new_offset = offset_in_page(jh2bh(jh_in)->b_data); new_offset = offset_in_page(jh2bh(jh_in)->b_data);
triggers = jh_in->b_triggers;
} }
mapped_data = kmap_atomic(new_page, KM_USER0); mapped_data = kmap_atomic(new_page, KM_USER0);
/* /*
* Fire any commit trigger. Do this before checking for escaping, * Fire data frozen trigger if data already wasn't frozen. Do this
* as the trigger may modify the magic offset. If a copy-out * before checking for escaping, as the trigger may modify the magic
* happens afterwards, it will have the correct data in the buffer. * offset. If a copy-out happens afterwards, it will have the correct
*/ * data in the buffer.
jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset, */
triggers); if (!done_copy_out)
jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
jh_in->b_triggers);
/* /*
* Check for escaping * Check for escaping
......
...@@ -725,6 +725,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -725,6 +725,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
page = jh2bh(jh)->b_page; page = jh2bh(jh)->b_page;
offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
source = kmap_atomic(page, KM_USER0); source = kmap_atomic(page, KM_USER0);
/* Fire data frozen trigger just before we copy the data */
jbd2_buffer_frozen_trigger(jh, source + offset,
jh->b_triggers);
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
kunmap_atomic(source, KM_USER0); kunmap_atomic(source, KM_USER0);
...@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh, ...@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
jh->b_triggers = type; jh->b_triggers = type;
} }
void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data, void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
struct jbd2_buffer_trigger_type *triggers) struct jbd2_buffer_trigger_type *triggers)
{ {
struct buffer_head *bh = jh2bh(jh); struct buffer_head *bh = jh2bh(jh);
if (!triggers || !triggers->t_commit) if (!triggers || !triggers->t_frozen)
return; return;
triggers->t_commit(triggers, bh, mapped_data, bh->b_size); triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
} }
void jbd2_buffer_abort_trigger(struct journal_head *jh, void jbd2_buffer_abort_trigger(struct journal_head *jh,
......
...@@ -196,14 +196,13 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock, ...@@ -196,14 +196,13 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
dump_stack(); dump_stack();
goto bail; goto bail;
} }
}
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
(unsigned long long)past_eof); (unsigned long long)past_eof);
if (create && (iblock >= past_eof)) if (create && (iblock >= past_eof))
set_buffer_new(bh_result); set_buffer_new(bh_result);
}
bail: bail:
if (err < 0) if (err < 0)
...@@ -459,36 +458,6 @@ int walk_page_buffers( handle_t *handle, ...@@ -459,36 +458,6 @@ int walk_page_buffers( handle_t *handle,
return ret; return ret;
} }
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
unsigned from,
unsigned to)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle;
int ret = 0;
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
if (ocfs2_should_order_data(inode)) {
ret = ocfs2_jbd2_file_inode(handle, inode);
if (ret < 0)
mlog_errno(ret);
}
out:
if (ret) {
if (!IS_ERR(handle))
ocfs2_commit_trans(osb, handle);
handle = ERR_PTR(ret);
}
return handle;
}
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{ {
sector_t status; sector_t status;
...@@ -1131,23 +1100,37 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, ...@@ -1131,23 +1100,37 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
*/ */
static int ocfs2_grab_pages_for_write(struct address_space *mapping, static int ocfs2_grab_pages_for_write(struct address_space *mapping,
struct ocfs2_write_ctxt *wc, struct ocfs2_write_ctxt *wc,
u32 cpos, loff_t user_pos, int new, u32 cpos, loff_t user_pos,
unsigned user_len, int new,
struct page *mmap_page) struct page *mmap_page)
{ {
int ret = 0, i; int ret = 0, i;
unsigned long start, target_index, index; unsigned long start, target_index, end_index, index;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
loff_t last_byte;
target_index = user_pos >> PAGE_CACHE_SHIFT; target_index = user_pos >> PAGE_CACHE_SHIFT;
/* /*
* Figure out how many pages we'll be manipulating here. For * Figure out how many pages we'll be manipulating here. For
* non allocating write, we just change the one * non allocating write, we just change the one
* page. Otherwise, we'll need a whole clusters worth. * page. Otherwise, we'll need a whole clusters worth. If we're
* writing past i_size, we only need enough pages to cover the
* last page of the write.
*/ */
if (new) { if (new) {
wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
/*
* We need the index *past* the last page we could possibly
* touch. This is the page past the end of the write or
* i_size, whichever is greater.
*/
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
if ((start + wc->w_num_pages) > end_index)
wc->w_num_pages = end_index - start;
} else { } else {
wc->w_num_pages = 1; wc->w_num_pages = 1;
start = target_index; start = target_index;
...@@ -1620,21 +1603,20 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, ...@@ -1620,21 +1603,20 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* write path can treat it as an non-allocating write, which has no * write path can treat it as an non-allocating write, which has no
* special case code for sparse/nonsparse files. * special case code for sparse/nonsparse files.
*/ */
static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, static int ocfs2_expand_nonsparse_inode(struct inode *inode,
unsigned len, struct buffer_head *di_bh,
loff_t pos, unsigned len,
struct ocfs2_write_ctxt *wc) struct ocfs2_write_ctxt *wc)
{ {
int ret; int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
loff_t newsize = pos + len; loff_t newsize = pos + len;
if (ocfs2_sparse_alloc(osb)) BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
return 0;
if (newsize <= i_size_read(inode)) if (newsize <= i_size_read(inode))
return 0; return 0;
ret = ocfs2_extend_no_holes(inode, newsize, pos); ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
if (ret) if (ret)
mlog_errno(ret); mlog_errno(ret);
...@@ -1644,6 +1626,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, ...@@ -1644,6 +1626,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
return ret; return ret;
} }
static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
loff_t pos)
{
int ret = 0;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
if (pos > i_size_read(inode))
ret = ocfs2_zero_extend(inode, di_bh, pos);
return ret;
}
int ocfs2_write_begin_nolock(struct address_space *mapping, int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata, struct page **pagep, void **fsdata,
...@@ -1679,7 +1673,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, ...@@ -1679,7 +1673,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
} }
} }
ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc); if (ocfs2_sparse_alloc(osb))
ret = ocfs2_zero_tail(inode, di_bh, pos);
else
ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
wc);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
goto out; goto out;
...@@ -1789,7 +1787,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, ...@@ -1789,7 +1787,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
* that we can zero and flush if we error after adding the * that we can zero and flush if we error after adding the
* extent. * extent.
*/ */
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page); cluster_of_pages, mmap_page);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
......
...@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, ...@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
struct dlm_ctxt *dlm = NULL; struct dlm_ctxt *dlm = NULL;
struct dlm_ctxt *new_ctxt = NULL; struct dlm_ctxt *new_ctxt = NULL;
if (strlen(domain) > O2NM_MAX_NAME_LEN) { if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
ret = -ENAMETOOLONG; ret = -ENAMETOOLONG;
mlog(ML_ERROR, "domain name length too long\n"); mlog(ML_ERROR, "domain name length too long\n");
goto leave; goto leave;
...@@ -1709,6 +1709,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, ...@@ -1709,6 +1709,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
} }
if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
spin_unlock(&dlm_domain_lock);
mlog(ML_ERROR, mlog(ML_ERROR,
"Requested locking protocol version is not " "Requested locking protocol version is not "
"compatible with already registered domain " "compatible with already registered domain "
......
...@@ -2808,14 +2808,8 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, ...@@ -2808,14 +2808,8 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
mlog(0, "trying again...\n"); mlog(0, "trying again...\n");
goto again; goto again;
} }
/* now that we are sure the MIGRATING state is there, drop
* the unneded state which blocked threads trying to DIRTY */
spin_lock(&res->spinlock);
BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
spin_unlock(&res->spinlock);
ret = 0;
/* did the target go down or die? */ /* did the target go down or die? */
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
if (!test_bit(target, dlm->domain_map)) { if (!test_bit(target, dlm->domain_map)) {
...@@ -2825,10 +2819,22 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, ...@@ -2825,10 +2819,22 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
} }
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
/*
* if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
* another try; otherwise, we are sure the MIGRATING state is there,
* drop the unneded state which blocked threads trying to DIRTY
*/
spin_lock(&res->spinlock);
BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
if (!ret)
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
spin_unlock(&res->spinlock);
/* /*
* at this point: * at this point:
* *
* o the DLM_LOCK_RES_MIGRATING flag is set * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
* o there are no pending asts on this lockres * o there are no pending asts on this lockres
* o all processes trying to reserve an ast on this * o all processes trying to reserve an ast on this
* lockres must wait for the MIGRATING flag to clear * lockres must wait for the MIGRATING flag to clear
......
...@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) ...@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit; int bit;
bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit >= O2NM_MAX_NODES || bit < 0) if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else else
......
This diff is collapsed.
...@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb, ...@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
int ocfs2_simple_size_update(struct inode *inode, int ocfs2_simple_size_update(struct inode *inode,
struct buffer_head *di_bh, struct buffer_head *di_bh,
u64 new_i_size); u64 new_i_size);
int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
u64 zero_to); u64 new_i_size, u64 zero_to);
int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
loff_t zero_to);
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat); struct kstat *stat);
......
...@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger ...@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger
return container_of(triggers, struct ocfs2_triggers, ot_triggers); return container_of(triggers, struct ocfs2_triggers, ot_triggers);
} }
static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh, struct buffer_head *bh,
void *data, size_t size) void *data, size_t size)
{ {
...@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, ...@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
* Quota blocks have their own trigger because the struct ocfs2_block_check * Quota blocks have their own trigger because the struct ocfs2_block_check
* offset depends on the blocksize. * offset depends on the blocksize.
*/ */
static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh, struct buffer_head *bh,
void *data, size_t size) void *data, size_t size)
{ {
...@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, ...@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
* Directory blocks also have their own trigger because the * Directory blocks also have their own trigger because the
* struct ocfs2_block_check offset depends on the blocksize. * struct ocfs2_block_check offset depends on the blocksize.
*/ */
static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh, struct buffer_head *bh,
void *data, size_t size) void *data, size_t size)
{ {
...@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, ...@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
static struct ocfs2_triggers di_triggers = { static struct ocfs2_triggers di_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_dinode, i_check), .ot_offset = offsetof(struct ocfs2_dinode, i_check),
...@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = { ...@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = {
static struct ocfs2_triggers eb_triggers = { static struct ocfs2_triggers eb_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_extent_block, h_check), .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
...@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = { ...@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = {
static struct ocfs2_triggers rb_triggers = { static struct ocfs2_triggers rb_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
...@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = { ...@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = {
static struct ocfs2_triggers gd_triggers = { static struct ocfs2_triggers gd_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_group_desc, bg_check), .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
...@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = { ...@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = {
static struct ocfs2_triggers db_triggers = { static struct ocfs2_triggers db_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_db_commit_trigger, .t_frozen = ocfs2_db_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
}; };
static struct ocfs2_triggers xb_triggers = { static struct ocfs2_triggers xb_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
...@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = { ...@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = {
static struct ocfs2_triggers dq_triggers = { static struct ocfs2_triggers dq_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_dq_commit_trigger, .t_frozen = ocfs2_dq_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
}; };
static struct ocfs2_triggers dr_triggers = { static struct ocfs2_triggers dr_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
...@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = { ...@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = {
static struct ocfs2_triggers dl_triggers = { static struct ocfs2_triggers dl_triggers = {
.ot_triggers = { .ot_triggers = {
.t_commit = ocfs2_commit_trigger, .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger, .t_abort = ocfs2_abort_trigger,
}, },
.ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
...@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work) ...@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
mutex_lock(&os->os_lock); mutex_lock(&os->os_lock);
ocfs2_queue_orphan_scan(osb); ocfs2_queue_orphan_scan(osb);
if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
schedule_delayed_work(&os->os_orphan_scan_work, queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
ocfs2_orphan_scan_timeout()); ocfs2_orphan_scan_timeout());
mutex_unlock(&os->os_lock); mutex_unlock(&os->os_lock);
} }
...@@ -1976,7 +1976,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) ...@@ -1976,7 +1976,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
else { else {
atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
schedule_delayed_work(&os->os_orphan_scan_work, queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
ocfs2_orphan_scan_timeout()); ocfs2_orphan_scan_timeout());
} }
} }
......
...@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) ...@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
{ {
unsigned int la_mb; unsigned int la_mb;
unsigned int gd_mb; unsigned int gd_mb;
unsigned int la_max_mb;
unsigned int megs_per_slot; unsigned int megs_per_slot;
struct super_block *sb = osb->sb; struct super_block *sb = osb->sb;
...@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) ...@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
if (megs_per_slot < la_mb) if (megs_per_slot < la_mb)
la_mb = megs_per_slot; la_mb = megs_per_slot;
/* We can't store more bits than we can in a block. */
la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
ocfs2_local_alloc_size(sb) * 8);
if (la_mb > la_max_mb)
la_mb = la_max_mb;
return la_mb; return la_mb;
} }
......
...@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) ...@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
* locking allocators ranks above a transaction start * locking allocators ranks above a transaction start
*/ */
WARN_ON(journal_current_handle()); WARN_ON(journal_current_handle());
status = ocfs2_extend_no_holes(gqinode, status = ocfs2_extend_no_holes(gqinode, NULL,
gqinode->i_size + (need_alloc << sb->s_blocksize_bits), gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
gqinode->i_size); gqinode->i_size);
if (status < 0) if (status < 0)
......
...@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( ...@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
u64 p_blkno; u64 p_blkno;
/* We are protected by dqio_sem so no locking needed */ /* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, status = ocfs2_extend_no_holes(lqinode, NULL,
lqinode->i_size + 2 * sb->s_blocksize, lqinode->i_size + 2 * sb->s_blocksize,
lqinode->i_size); lqinode->i_size);
if (status < 0) { if (status < 0) {
...@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( ...@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
return ocfs2_local_quota_add_chunk(sb, type, offset); return ocfs2_local_quota_add_chunk(sb, type, offset);
/* We are protected by dqio_sem so no locking needed */ /* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, status = ocfs2_extend_no_holes(lqinode, NULL,
lqinode->i_size + sb->s_blocksize, lqinode->i_size + sb->s_blocksize,
lqinode->i_size); lqinode->i_size);
if (status < 0) { if (status < 0) {
......
...@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, ...@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
/*
* We only duplicate pages until we reach the page contains i_size - 1.
* So trim 'end' to i_size.
*/
if (end > i_size_read(context->inode))
end = i_size_read(context->inode);
while (offset < end) { while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT; page_index = offset >> PAGE_CACHE_SHIFT;
...@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry, ...@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
struct inode *inode = old_dentry->d_inode; struct inode *inode = old_dentry->d_inode;
struct buffer_head *new_bh = NULL; struct buffer_head *new_bh = NULL;
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
ret = filemap_fdatawrite(inode->i_mapping); ret = filemap_fdatawrite(inode->i_mapping);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
......
...@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, ...@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
le16_to_cpu(bg->bg_free_bits_count)); le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
le16_to_cpu(bg->bg_bits)); le16_to_cpu(bg->bg_bits));
cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg->bg_blkno); cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count)) if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
le16_add_cpu(&cl->cl_next_free_rec, 1); le16_add_cpu(&cl->cl_next_free_rec, 1);
......
...@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, ...@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_set_ctxt *ctxt) struct ocfs2_xattr_set_ctxt *ctxt)
{ {
int status = 0; int status = 0, credits;
handle_t *handle = ctxt->handle; handle_t *handle = ctxt->handle;
enum ocfs2_alloc_restarted why; enum ocfs2_alloc_restarted why;
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
...@@ -719,11 +719,12 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, ...@@ -719,11 +719,12 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
while (clusters_to_add) {
status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE); OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) { if (status < 0) {
mlog_errno(status); mlog_errno(status);
goto leave; break;
} }
prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
...@@ -735,22 +736,37 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, ...@@ -735,22 +736,37 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
ctxt->data_ac, ctxt->data_ac,
ctxt->meta_ac, ctxt->meta_ac,
&why); &why);
if (status < 0) { if ((status < 0) && (status != -EAGAIN)) {
if (status != -ENOSPC)
mlog_errno(status); mlog_errno(status);
goto leave; break;
} }
ocfs2_journal_dirty(handle, vb->vb_bh); ocfs2_journal_dirty(handle, vb->vb_bh);
clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters; clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
prev_clusters;
if (why != RESTART_NONE && clusters_to_add) {
/* /*
* We should have already allocated enough space before the transaction, * We can only fail in case the alloc file doesn't give
* so no need to restart. * up enough clusters.
*/ */
BUG_ON(why != RESTART_NONE || clusters_to_add); BUG_ON(why == RESTART_META);
leave: mlog(0, "restarting xattr value extension for %u"
" clusters,.\n", clusters_to_add);
credits = ocfs2_calc_extend_credits(inode->i_sb,
&vb->vb_xv->xr_list,
clusters_to_add);
status = ocfs2_extend_trans(handle, credits);
if (status < 0) {
status = -ENOMEM;
mlog_errno(status);
break;
}
}
}
return status; return status;
} }
...@@ -6788,16 +6804,15 @@ static int ocfs2_lock_reflink_xattr_rec_allocators( ...@@ -6788,16 +6804,15 @@ static int ocfs2_lock_reflink_xattr_rec_allocators(
return ret; return ret;
} }
static int ocfs2_reflink_xattr_buckets(handle_t *handle, static int ocfs2_reflink_xattr_bucket(handle_t *handle,
u64 blkno, u64 new_blkno, u32 clusters, u64 blkno, u64 new_blkno, u32 clusters,
u32 *cpos, int num_buckets,
struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *data_ac,
struct ocfs2_reflink_xattr_tree_args *args) struct ocfs2_reflink_xattr_tree_args *args)
{ {
int i, j, ret = 0; int i, j, ret = 0;
struct super_block *sb = args->reflink->old_inode->i_sb; struct super_block *sb = args->reflink->old_inode->i_sb;
u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
u32 num_buckets = clusters * bpc;
int bpb = args->old_bucket->bu_blocks; int bpb = args->old_bucket->bu_blocks;
struct ocfs2_xattr_value_buf vb = { struct ocfs2_xattr_value_buf vb = {
.vb_access = ocfs2_journal_access, .vb_access = ocfs2_journal_access,
...@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, ...@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
break; break;
} }
/*
* The real bucket num in this series of blocks is stored
* in the 1st bucket.
*/
if (i == 0)
num_buckets = le16_to_cpu(
bucket_xh(args->old_bucket)->xh_num_buckets);
ret = ocfs2_xattr_bucket_journal_access(handle, ret = ocfs2_xattr_bucket_journal_access(handle,
args->new_bucket, args->new_bucket,
OCFS2_JOURNAL_ACCESS_CREATE); OCFS2_JOURNAL_ACCESS_CREATE);
...@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, ...@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
bucket_block(args->old_bucket, j), bucket_block(args->old_bucket, j),
sb->s_blocksize); sb->s_blocksize);
/*
* Record the start cpos so that we can use it to initialize
* our xattr tree we also set the xh_num_bucket for the new
* bucket.
*/
if (i == 0) {
*cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
xh_entries[0].xe_name_hash);
bucket_xh(args->new_bucket)->xh_num_buckets =
cpu_to_le16(num_buckets);
}
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
ret = ocfs2_reflink_xattr_header(handle, args->reflink, ret = ocfs2_reflink_xattr_header(handle, args->reflink,
...@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, ...@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
} }
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
ocfs2_xattr_bucket_relse(args->old_bucket); ocfs2_xattr_bucket_relse(args->old_bucket);
ocfs2_xattr_bucket_relse(args->new_bucket); ocfs2_xattr_bucket_relse(args->new_bucket);
} }
...@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, ...@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
ocfs2_xattr_bucket_relse(args->new_bucket); ocfs2_xattr_bucket_relse(args->new_bucket);
return ret; return ret;
} }
static int ocfs2_reflink_xattr_buckets(handle_t *handle,
struct inode *inode,
struct ocfs2_reflink_xattr_tree_args *args,
struct ocfs2_extent_tree *et,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac,
u64 blkno, u32 cpos, u32 len)
{
int ret, first_inserted = 0;
u32 p_cluster, num_clusters, reflink_cpos = 0;
u64 new_blkno;
unsigned int num_buckets, reflink_buckets;
unsigned int bpc =
ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
ocfs2_xattr_bucket_relse(args->old_bucket);
while (len && num_buckets) {
ret = ocfs2_claim_clusters(handle, data_ac,
1, &p_cluster, &num_clusters);
if (ret) {
mlog_errno(ret);
goto out;
}
new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
reflink_buckets = min(num_buckets, bpc * num_clusters);
ret = ocfs2_reflink_xattr_bucket(handle, blkno,
new_blkno, num_clusters,
&reflink_cpos, reflink_buckets,
meta_ac, data_ac, args);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* For the 1st allocated cluster, we make it use the same cpos
* so that the xattr tree looks the same as the original one
* in the most case.
*/
if (!first_inserted) {
reflink_cpos = cpos;
first_inserted = 1;
}
ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
num_clusters, 0, meta_ac);
if (ret)
mlog_errno(ret);
mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
(unsigned long long)new_blkno, num_clusters, reflink_cpos);
len -= num_clusters;
blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
num_buckets -= reflink_buckets;
}
out:
return ret;
}
/* /*
* Create the same xattr extent record in the new inode's xattr tree. * Create the same xattr extent record in the new inode's xattr tree.
*/ */
...@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, ...@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
void *para) void *para)
{ {
int ret, credits = 0; int ret, credits = 0;
u32 p_cluster, num_clusters;
u64 new_blkno;
handle_t *handle; handle_t *handle;
struct ocfs2_reflink_xattr_tree_args *args = struct ocfs2_reflink_xattr_tree_args *args =
(struct ocfs2_reflink_xattr_tree_args *)para; (struct ocfs2_reflink_xattr_tree_args *)para;
...@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, ...@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_extent_tree et; struct ocfs2_extent_tree et;
mlog(0, "reflink xattr buckets %llu len %u\n",
(unsigned long long)blkno, len);
ocfs2_init_xattr_tree_extent_tree(&et, ocfs2_init_xattr_tree_extent_tree(&et,
INODE_CACHE(args->reflink->new_inode), INODE_CACHE(args->reflink->new_inode),
args->new_blk_bh); args->new_blk_bh);
...@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, ...@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
goto out; goto out;
} }
ret = ocfs2_claim_clusters(handle, data_ac, ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
len, &p_cluster, &num_clusters); meta_ac, data_ac,
if (ret) { blkno, cpos, len);
mlog_errno(ret);
goto out_commit;
}
new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
(unsigned long long)blkno, (unsigned long long)new_blkno, len);
ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
meta_ac, data_ac, args);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
(unsigned long long)new_blkno, len, cpos);
ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
len, 0, meta_ac);
if (ret) if (ret)
mlog_errno(ret); mlog_errno(ret);
out_commit:
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
out: out:
......
...@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); ...@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
struct jbd2_buffer_trigger_type { struct jbd2_buffer_trigger_type {
/* /*
* Fired just before a buffer is written to the journal. * Fired a the moment data to write to the journal are known to be
* mapped_data is a mapped buffer that is the frozen data for * stable - so either at the moment b_frozen_data is created or just
* commit. * before a buffer is written to the journal. mapped_data is a mapped
* buffer that is the frozen data for commit.
*/ */
void (*t_commit)(struct jbd2_buffer_trigger_type *type, void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
struct buffer_head *bh, void *mapped_data, struct buffer_head *bh, void *mapped_data,
size_t size); size_t size);
...@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type { ...@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type {
struct buffer_head *bh); struct buffer_head *bh);
}; };
extern void jbd2_buffer_commit_trigger(struct journal_head *jh, extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
void *mapped_data, void *mapped_data,
struct jbd2_buffer_trigger_type *triggers); struct jbd2_buffer_trigger_type *triggers);
extern void jbd2_buffer_abort_trigger(struct journal_head *jh, extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment