Commit aa641e8a authored by Stephen Lord's avatar Stephen Lord

Merge ssh://lord@kernel.bkbits.net/xfs-2.6

into penguin.americas.sgi.com:/src/lord/bitkeeper/xfs-2.6
parents 7d7bb19c c910b5d0
......@@ -461,7 +461,8 @@ map_unwritten(
struct page *page;
tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
tloff = min(tlast, start_page->index + pb->pb_page_count - 1);
tloff = (mp->pbm_offset + mp->pbm_bsize) >> PAGE_CACHE_SHIFT;
tloff = min(tlast, tloff);
for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
page = probe_unwritten_page(mapping, tindex, mp, pb,
PAGE_CACHE_SIZE, &bs, bbits);
......@@ -1041,6 +1042,8 @@ count_page_state(
do {
if (buffer_uptodate(bh) && !buffer_mapped(bh))
(*unmapped) = 1;
else if (buffer_unwritten(bh) && !buffer_delay(bh))
clear_buffer_unwritten(bh);
else if (buffer_unwritten(bh))
(*unwritten) = 1;
else if (buffer_delay(bh))
......
......@@ -218,7 +218,7 @@ xfs_read(
int error;
vrwlock_t locktype = VRWLOCK_READ;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, size,
error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, size,
FILP_DELAY_FLAG(file), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
......@@ -278,7 +278,7 @@ xfs_sendfile(
vrwlock_t locktype = VRWLOCK_READ;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, count,
error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
......@@ -612,7 +612,7 @@ xfs_write(
loff_t savedsize = *offset;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, bdp,
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
*offset, size,
FILP_DELAY_FLAG(file), &locktype);
if (error) {
......
......@@ -33,7 +33,7 @@
#include "xfs.h"
#include <linux/proc_fs.h>
struct xfsstats xfsstats;
DEFINE_PER_CPU(struct xfsstats, xfsstats);
STATIC int
xfs_read_xfsstats(
......@@ -44,7 +44,11 @@ xfs_read_xfsstats(
int *eof,
void *data)
{
int i, j, len;
int c, i, j, len, val;
__uint64_t xs_xstrat_bytes = 0;
__uint64_t xs_write_bytes = 0;
__uint64_t xs_read_bytes = 0;
static struct xstats_entry {
char *desc;
int endpoint;
......@@ -65,21 +69,32 @@ xfs_read_xfsstats(
{ "vnodes", XFSSTAT_END_VNODE_OPS },
};
/* Loop over all stats groups */
for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) {
len += sprintf(buffer + len, xstats[i].desc);
/* inner loop does each group */
while (j < xstats[i].endpoint) {
len += sprintf(buffer + len, " %u",
*(((__u32*)&xfsstats) + j));
val = 0;
/* sum over all cpus */
for (c = 0; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
}
len += sprintf(buffer + len, " %u", val);
j++;
}
buffer[len++] = '\n';
}
/* extra precision counters */
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i)) continue;
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
}
len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n",
xfsstats.xs_xstrat_bytes,
xfsstats.xs_write_bytes,
xfsstats.xs_read_bytes);
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
len += sprintf(buffer + len, "debug %u\n",
#if defined(XFSDEBUG)
1);
......
......@@ -35,6 +35,8 @@
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
#include <linux/percpu.h>
/*
* XFS global statistics
*/
......@@ -126,11 +128,13 @@ struct xfsstats {
__uint64_t xs_read_bytes;
};
extern struct xfsstats xfsstats;
DECLARE_PER_CPU(struct xfsstats, xfsstats);
# define XFS_STATS_INC(count) ( xfsstats.count++ )
# define XFS_STATS_DEC(count) ( xfsstats.count-- )
# define XFS_STATS_ADD(count, inc) ( xfsstats.count += (inc) )
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define XFS_STATS_INC(count) (__get_cpu_var(xfsstats).count++)
#define XFS_STATS_DEC(count) (__get_cpu_var(xfsstats).count--)
#define XFS_STATS_ADD(count, inc) (__get_cpu_var(xfsstats).count += (inc))
extern void xfs_init_procfs(void);
extern void xfs_cleanup_procfs(void);
......
......@@ -48,17 +48,23 @@ xfs_stats_clear_proc_handler(
void *buffer,
size_t *lenp)
{
int ret, *valp = ctl->data;
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
for (c = 0; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
preempt_disable();
/* save vn_active, it's a universal truth! */
vn_active = xfsstats.vn_active;
memset(&xfsstats, 0, sizeof(xfsstats));
xfsstats.vn_active = vn_active;
vn_active = per_cpu(xfsstats, c).vn_active;
memset(&per_cpu(xfsstats, c), 0,
sizeof(struct xfsstats));
per_cpu(xfsstats, c).vn_active = vn_active;
preempt_enable();
}
xfs_stats_clear = 0;
}
......
......@@ -200,7 +200,7 @@ vn_revalidate(
vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address);
ASSERT(vp->v_fbhv != NULL);
va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT;
va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (!error) {
inode = LINVFS_GET_IP(vp);
......
......@@ -141,7 +141,7 @@ pagebuf_param_t pb_params = {
* Pagebuf statistics variables
*/
struct pbstats pbstats;
DEFINE_PER_CPU(struct pbstats, pbstats);
/*
* Pagebuf allocation / freeing.
......@@ -293,7 +293,7 @@ _pagebuf_initialize(
atomic_set(&pb->pb_pin_count, 0);
init_waitqueue_head(&pb->pb_waiters);
PB_STATS_INC(pbstats.pb_create);
PB_STATS_INC(pb_create);
PB_TRACE(pb, PB_TRACE_REC(get), target);
}
......@@ -485,7 +485,7 @@ _pagebuf_lookup_pages(
page = find_or_create_page(aspace, index, gfp_mask);
if (!page) {
if (--retry_count > 0) {
PB_STATS_INC(pbstats.pb_page_retries);
PB_STATS_INC(pb_page_retries);
pagebuf_daemon_wakeup(1);
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(10);
......@@ -495,7 +495,7 @@ _pagebuf_lookup_pages(
all_mapped = 0;
continue;
}
PB_STATS_INC(pbstats.pb_page_found);
PB_STATS_INC(pb_page_found);
mark_page_accessed(page);
pb->pb_pages[pi] = page;
} else {
......@@ -645,7 +645,7 @@ _pagebuf_find( /* find buffer for block */
h->pb_count++;
list_add(&new_pb->pb_hash_list, &h->pb_hash);
} else {
PB_STATS_INC(pbstats.pb_miss_locked);
PB_STATS_INC(pb_miss_locked);
}
spin_unlock(&h->pb_hash_lock);
......@@ -665,7 +665,7 @@ _pagebuf_find( /* find buffer for block */
/* wait for buffer ownership */
PB_TRACE(pb, PB_TRACE_REC(get_lk), 0);
pagebuf_lock(pb);
PB_STATS_INC(pbstats.pb_get_locked_waited);
PB_STATS_INC(pb_get_locked_waited);
} else {
/* We asked for a trylock and failed, no need
* to look at file offset and length here, we
......@@ -675,7 +675,7 @@ _pagebuf_find( /* find buffer for block */
*/
pagebuf_rele(pb);
PB_STATS_INC(pbstats.pb_busy_locked);
PB_STATS_INC(pb_busy_locked);
return (NULL);
}
} else {
......@@ -691,7 +691,7 @@ _pagebuf_find( /* find buffer for block */
_PBF_ADDR_ALLOCATED | \
_PBF_MEM_ALLOCATED;
PB_TRACE(pb, PB_TRACE_REC(got_lk), 0);
PB_STATS_INC(pbstats.pb_get_locked);
PB_STATS_INC(pb_get_locked);
return (pb);
}
......@@ -747,7 +747,7 @@ pagebuf_get( /* allocate a buffer */
return (NULL);
}
PB_STATS_INC(pbstats.pb_get);
PB_STATS_INC(pb_get);
/* fill in any missing pages */
error = _pagebuf_lookup_pages(pb, pb->pb_target->pbr_mapping, flags);
......@@ -766,7 +766,7 @@ pagebuf_get( /* allocate a buffer */
if (flags & PBF_READ) {
if (PBF_NOT_DONE(pb)) {
PB_TRACE(pb, PB_TRACE_REC(get_read), flags);
PB_STATS_INC(pbstats.pb_get_read);
PB_STATS_INC(pb_get_read);
pagebuf_iostart(pb, flags);
} else if (flags & PBF_ASYNC) {
/*
......@@ -1677,6 +1677,9 @@ pagebuf_daemon(
break;
}
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
list_del(&pb->pb_list);
list_add(&pb->pb_list, &tmp);
......@@ -1688,8 +1691,6 @@ pagebuf_daemon(
while (!list_empty(&tmp)) {
pb = list_entry(tmp.next, page_buf_t, pb_list);
list_del_init(&pb->pb_list);
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
pagebuf_iostrategy(pb);
}
......@@ -1720,6 +1721,7 @@ pagebuf_delwri_flush(
int flush_cnt = 0;
pagebuf_runall_queues(pagebuf_dataio_workqueue);
pagebuf_runall_queues(pagebuf_logio_workqueue);
spin_lock(&pbd_delwrite_lock);
INIT_LIST_HEAD(&tmp);
......@@ -1742,47 +1744,32 @@ pagebuf_delwri_flush(
continue;
}
if (flags & PBDF_TRYLOCK) {
if (!pagebuf_cond_lock(pb)) {
pincount++;
continue;
}
}
list_del_init(&pb->pb_list);
if (flags & PBDF_WAIT) {
list_add(&pb->pb_list, &tmp);
pb->pb_flags &= ~PBF_ASYNC;
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
list_move(&pb->pb_list, &tmp);
}
/* ok found all the items that can be worked on
* drop the lock and process the private list */
spin_unlock(&pbd_delwrite_lock);
if ((flags & PBDF_TRYLOCK) == 0) {
pagebuf_lock(pb);
}
list_for_each_safe(curr, next, &tmp) {
pb = list_entry(curr, page_buf_t, pb_list);
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
if (flags & PBDF_WAIT)
pb->pb_flags &= ~PBF_ASYNC;
else
list_del_init(curr);
pagebuf_lock(pb);
pagebuf_iostrategy(pb);
if (++flush_cnt > 32) {
blk_run_queues();
flush_cnt = 0;
}
spin_lock(&pbd_delwrite_lock);
}
spin_unlock(&pbd_delwrite_lock);
blk_run_queues();
if (pinptr)
*pinptr = pincount;
if ((flags & PBDF_WAIT) == 0)
return;
while (!list_empty(&tmp)) {
pb = list_entry(tmp.next, page_buf_t, pb_list);
......@@ -1792,6 +1779,9 @@ pagebuf_delwri_flush(
pagebuf_unlock(pb);
pagebuf_rele(pb);
}
if (pinptr)
*pinptr = pincount;
}
STATIC int
......@@ -1846,14 +1836,18 @@ pb_stats_clear_handler(
void *buffer,
size_t *lenp)
{
int ret;
int c, ret;
int *valp = ctl->data;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) {
printk("XFS Clearing pbstats\n");
memset(&pbstats, 0, sizeof(pbstats));
for (c = 0; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
memset(&per_cpu(pbstats, c), 0,
sizeof(struct pbstats));
}
pb_params.stats_clear.val = 0;
}
......@@ -1907,13 +1901,17 @@ pagebuf_readstats(
int *eof,
void *data)
{
int i, len;
int c, i, len, val;
len = 0;
len += sprintf(buffer + len, "pagebuf");
for (i = 0; i < sizeof(pbstats) / sizeof(u_int32_t); i++) {
len += sprintf(buffer + len, " %u",
*(((u_int32_t*)&pbstats) + i));
for (i = 0; i < sizeof(struct pbstats) / sizeof(u_int32_t); i++) {
val = 0;
for (c = 0 ; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
val += *(((u_int32_t*)&per_cpu(pbstats, c) + i));
}
len += sprintf(buffer + len, " %u", val);
}
buffer[len++] = '\n';
......
......@@ -368,7 +368,6 @@ extern int pagebuf_ispin( /* check if buffer is pinned */
/* Delayed Write Buffer Routines */
#define PBDF_WAIT 0x01
#define PBDF_TRYLOCK 0x02
extern void pagebuf_delwri_flush(
pb_target_t *,
unsigned long,
......
......@@ -37,6 +37,7 @@
#ifndef __PAGE_BUF_PRIVATE_H__
#define __PAGE_BUF_PRIVATE_H__
#include <linux/percpu.h>
#include "page_buf.h"
#define _PAGE_BUF_INTERNAL_
......@@ -120,9 +121,11 @@ struct pbstats {
u_int32_t pb_get_read;
};
extern struct pbstats pbstats;
DECLARE_PER_CPU(struct pbstats, pbstats);
#define PB_STATS_INC(count) ( count ++ )
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define PB_STATS_INC(count) (__get_cpu_var(pbstats).count++)
#ifndef STATIC
# define STATIC static
......
......@@ -5553,7 +5553,7 @@ xfs_getbmap(
&& DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
&& whichfork == XFS_DATA_FORK) {
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, 0, 0, 0, NULL);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
......
......@@ -3938,7 +3938,7 @@ xlog_recover_finish(
xlog_recover_check_summary(log);
cmn_err(CE_NOTE,
"Ending XFS recovery on filesystem: %s (dev: %d/%d)",
"Ending XFS recovery on filesystem: %s (dev: %s)",
log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
log->l_flags &= ~XLOG_RECOVERY_NEEDED;
} else {
......
......@@ -91,7 +91,7 @@ struct xfs_bmap_free;
* Prototypes and functions for the Data Migration subsystem.
*/
typedef int (*xfs_send_data_t)(int, struct bhv_desc *,
typedef int (*xfs_send_data_t)(int, struct vnode *,
xfs_off_t, size_t, int, vrwlock_t *);
typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
typedef int (*xfs_send_destroy_t)(struct vnode *, dm_right_t);
......@@ -109,8 +109,8 @@ typedef struct xfs_dmops {
xfs_send_unmount_t xfs_send_unmount;
} xfs_dmops_t;
#define XFS_SEND_DATA(mp, ev,bdp,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,bdp,off,len,fl,lock)
#define XFS_SEND_DATA(mp, ev,vp,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,vp,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
(*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl)
#define XFS_SEND_DESTROY(mp, vp,right) \
......
......@@ -83,7 +83,7 @@ typedef __uint64_t __psunsigned_t;
* XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well
* as requiring XFS_BIG_BLKNOS to be set.
*/
#if defined(CONFIG_LBD) || (defined(HAVE_SECTOR_T) && (BITS_PER_LONG == 64))
#if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
# define XFS_BIG_BLKNOS 1
# if BITS_PER_LONG == 64
# define XFS_BIG_INUMS 1
......
......@@ -144,11 +144,9 @@ xfs_getattr(
xfs_ilock(ip, XFS_ILOCK_SHARED);
vap->va_size = ip->i_d.di_size;
if (vap->va_mask == XFS_AT_SIZE) {
if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
if (vap->va_mask == XFS_AT_SIZE)
goto all_done;
vap->va_nblocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
vap->va_nodeid = ip->i_ino;
......@@ -162,11 +160,8 @@ xfs_getattr(
*/
if ((vap->va_mask &
~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID|
XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0) {
if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0)
goto all_done;
/*
* Copy from in-core inode.
......@@ -250,37 +245,44 @@ xfs_getattr(
*/
if ((vap->va_mask &
(XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) {
if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
goto all_done;
/*
* convert di_flags to xflags
*/
vap->va_xflags =
((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
XFS_XFLAG_REALTIME : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) ?
XFS_XFLAG_PREALLOC : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) ?
XFS_XFLAG_IMMUTABLE : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_APPEND) ?
XFS_XFLAG_APPEND : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_SYNC) ?
XFS_XFLAG_SYNC : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_NOATIME) ?
XFS_XFLAG_NOATIME : 0) |
((ip->i_d.di_flags & XFS_DIFLAG_NODUMP) ?
XFS_XFLAG_NODUMP: 0) |
(XFS_IFORK_Q(ip) ?
XFS_XFLAG_HASATTR : 0);
vap->va_xflags = 0;
if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
vap->va_xflags |= XFS_XFLAG_REALTIME;
if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC)
vap->va_xflags |= XFS_XFLAG_PREALLOC;
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
vap->va_xflags |= XFS_XFLAG_IMMUTABLE;
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
vap->va_xflags |= XFS_XFLAG_APPEND;
if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
vap->va_xflags |= XFS_XFLAG_SYNC;
if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
vap->va_xflags |= XFS_XFLAG_NOATIME;
if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
vap->va_xflags |= XFS_XFLAG_NODUMP;
if (XFS_IFORK_Q(ip))
vap->va_xflags |= XFS_XFLAG_HASATTR;
/*
* Exit for inode revalidate. See if any of the rest of
* the fields to be filled in are needed.
*/
if ((vap->va_mask &
(XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
goto all_done;
vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog;
vap->va_nextents =
(ip->i_df.if_flags & XFS_IFEXTENTS) ?
ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) :
ip->i_d.di_nextents;
if (ip->i_afp != NULL)
if (ip->i_afp)
vap->va_anextents =
(ip->i_afp->if_flags & XFS_IFEXTENTS) ?
ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) :
......@@ -290,6 +292,7 @@ xfs_getattr(
vap->va_gencount = ip->i_d.di_gen;
vap->va_vcode = 0L;
all_done:
if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
......@@ -414,7 +417,7 @@ xfs_setattr(
} else {
if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) &&
!(flags & ATTR_DMI)) {
code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, bdp,
code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp,
vap->va_size, 0, AT_DELAY_FLAG(flags), NULL);
if (code) {
lock_flags = 0;
......@@ -4162,7 +4165,7 @@ xfs_alloc_file_space(
end_dmi_offset = offset+len;
if (end_dmi_offset > ip->i_d.di_size)
end_dmi_offset = ip->i_d.di_size;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip),
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
offset, end_dmi_offset - offset,
0, NULL);
if (error)
......@@ -4409,7 +4412,7 @@ xfs_free_file_space(
DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
if (end_dmi_offset > ip->i_d.di_size)
end_dmi_offset = ip->i_d.di_size;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip),
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
offset, end_dmi_offset - offset,
AT_DELAY_FLAG(attr_flags), NULL);
if (error)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment