[XFS] XFS should not be looking at filp reference counts

A check for file_count is always a bad idea. Linux has the ->release
method to deal with cleanups on last close and ->flush is only for the
very rare case where we want to perform an operation on every drop of a
reference to a file struct.

This patch gets rid of vop_close and surrounding code in favour of simply
doing the page flushing from ->release.

SGI-PV: 966562
SGI-Modid: xfs-linux-melb:xfs-kern:28952a

Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Christoph Hellwig 2007-06-28 16:46:47 +10:00 committed by Tim Shimmin
parent 16a087d8e1
commit fbf3ce8d8e
3 changed files with 16 additions and 47 deletions

View File

@ -183,15 +183,6 @@ xfs_file_open(
return -bhv_vop_open(vn_from_inode(inode), NULL);
}
STATIC int
xfs_file_close(
struct file *filp,
fl_owner_t id)
{
return -bhv_vop_close(vn_from_inode(filp->f_path.dentry->d_inode), 0,
file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
}
STATIC int
xfs_file_release(
struct inode *inode,
@ -436,7 +427,6 @@ const struct file_operations xfs_file_operations = {
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
.flush = xfs_file_close,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
#ifdef HAVE_FOP_OPEN_EXEC
@ -458,7 +448,6 @@ const struct file_operations xfs_invis_file_operations = {
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
.flush = xfs_file_close,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
};

View File

@ -129,10 +129,7 @@ typedef enum bhv_vchange {
VCHANGE_FLAGS_IOEXCL_COUNT = 4
} bhv_vchange_t;
typedef enum { L_FALSE, L_TRUE } lastclose_t;
typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *);
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, int, struct cred *);
@ -200,7 +197,6 @@ typedef int (*vop_iflush_t)(bhv_desc_t *, int);
typedef struct bhv_vnodeops {
bhv_position_t vn_position; /* position within behavior chain */
vop_open_t vop_open;
vop_close_t vop_close;
vop_read_t vop_read;
vop_write_t vop_write;
vop_splice_read_t vop_splice_read;
@ -245,7 +241,6 @@ typedef struct bhv_vnodeops {
#define VNHEAD(vp) ((vp)->v_bh.bh_first)
#define VOP(op, vp) (*((bhv_vnodeops_t *)VNHEAD(vp)->bd_ops)->op)
#define bhv_vop_open(vp, cr) VOP(vop_open, vp)(VNHEAD(vp),cr)
#define bhv_vop_close(vp, f,last,cr) VOP(vop_close, vp)(VNHEAD(vp),f,last,cr)
#define bhv_vop_read(vp,file,iov,segs,offset,ioflags,cr) \
VOP(vop_read, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr)
#define bhv_vop_write(vp,file,iov,segs,offset,ioflags,cr) \

View File

@ -77,36 +77,6 @@ xfs_open(
return 0;
}
STATIC int
xfs_close(
bhv_desc_t *bdp,
int flags,
lastclose_t lastclose,
cred_t *credp)
{
bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
xfs_inode_t *ip = XFS_BHVTOI(bdp);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
if (lastclose != L_TRUE || !VN_ISREG(vp))
return 0;
/*
* If we previously truncated this file and removed old data in
* the process, we want to initiate "early" writeout on the last
* close. This is an attempt to combat the notorious NULL files
* problem which is particularly noticable from a truncate down,
* buffered (re-)write (delalloc), followed by a crash. What we
* are effectively doing here is significantly reducing the time
* window where we'd otherwise be exposed to that problem.
*/
if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
return bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE);
return 0;
}
/*
* xfs_getattr
*/
@ -1566,6 +1536,22 @@ xfs_release(
if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
return 0;
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
/*
* If we previously truncated this file and removed old data
* in the process, we want to initiate "early" writeout on
* the last close. This is an attempt to combat the notorious
* NULL files problem which is particularly noticable from a
* truncate down, buffered (re-)write (delalloc), followed by
* a crash. What we are effectively doing here is
* significantly reducing the time window where we'd otherwise
* be exposed to that problem.
*/
if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE);
}
#ifdef HAVE_REFCACHE
/* If we are in the NFS reference cache then don't do this now */
if (ip->i_refcache)
@ -4681,7 +4667,6 @@ xfs_change_file_space(
bhv_vnodeops_t xfs_vnodeops = {
BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
.vop_open = xfs_open,
.vop_close = xfs_close,
.vop_read = xfs_read,
#ifdef HAVE_SPLICE
.vop_splice_read = xfs_splice_read,