forked from luck/tmp_suning_uos_patched
File locking related changes for v3.20 (pile #1)
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJU1MYmAAoJEAAOaEEZVoIV/rAQAKoHj/PCOATTy05lF/NDhJlS 6NbNjupnC8HrbNPv6Z/cQ902eC1YRVH96gf6we4FeAm9Tjctpje6uEqvPQCUxpot 2jWgCG+g95OeEaQEjXQvR3x5ZfXvPUtwKVOnMF423L1p5Xfbj3kJfGi+dv2k8XOi GArsUB7uCwqLyyz+L47RJ2Cz7s47M9O25HkVRfWlgYOv+4afq5OpADGKQAhMLL/s CPhYgqw/7r1p+pLkjUE/x+5BAliDzUinFtDatgD4CeHOdq0RKlxzQ1rFg6uJVg/k 3ZttGOxWUtGIeGM4v5cosDFReLPCESax/TUzn58jxxFR702MjHAA+lHRgjZoWvW/ 9EnShl0XlznQX1ns6f0rI1seWe4M5R3CWus8AcG0kDmdbTp8nARo+pBLFhCME/kZ 15GHLz4tDSRt5SNow6aqJdlYJR7p3WrsceKyM5aH9M7odM3eaB5vJxIJ0fljsZbS Qtz4t+Ua1oVSYD7TX3y7EUiQVPVo8VKS3o6Ua73wCHIXNbSH7hZLOvPLFs6V1Psi RKqRiad5iO3+iavVGuDDcs12zXZ5hmksE8oMh0NkjFZ6wJlO4Hf5iOt5thABNDmT Km+40IBq1DYwclPTofaRpB+ytDOnWedMxdWfWdEWQ710zuuNY3cfi/XMXEX34kBY fLhUMabqcyfUegpA6S0R =6+UV -----END PGP SIGNATURE----- Merge tag 'locks-v3.20-1' of git://git.samba.org/jlayton/linux Pull file locking related changes #1 from Jeff Layton: "This patchset contains a fairly major overhaul of how file locks are tracked within the inode. Rather than a single list, we now create a per-inode "lock context" that contains individual lists for the file locks, and a new dedicated spinlock for them. There are changes in other trees that are based on top of this set so it may be easiest to pull this in early" * tag 'locks-v3.20-1' of git://git.samba.org/jlayton/linux: locks: update comments that refer to inode->i_flock locks: consolidate NULL i_flctx checks in locks_remove_file locks: keep a count of locks on the flctx lists locks: clean up the lm_change prototype locks: add a dedicated spinlock to protect i_flctx lists locks: remove i_flock field from struct inode locks: convert lease handling to file_lock_context locks: convert posix locks to file_lock_context locks: move flock locks to file_lock_context ceph: move spinlocking into ceph_encode_locks_to_buffer and ceph_count_locks locks: add a new struct file_locking_context pointer to struct inode locks: have locks_release_file use flock_lock_file to release generic flock locks locks: add new struct list_head to struct file_lock
This commit is contained in:
commit
4b4f8580a4
|
@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Must be called with lock_flocks() already held. Fills in the passed
|
||||
* counter variables, so you can prepare pagelist metadata before calling
|
||||
* ceph_encode_locks.
|
||||
/*
|
||||
* Fills in the passed counter variables, so you can prepare pagelist metadata
|
||||
* before calling ceph_encode_locks.
|
||||
*/
|
||||
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
|
||||
{
|
||||
struct file_lock *lock;
|
||||
struct file_lock_context *ctx;
|
||||
|
||||
*fcntl_count = 0;
|
||||
*flock_count = 0;
|
||||
|
||||
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
|
||||
if (lock->fl_flags & FL_POSIX)
|
||||
++(*fcntl_count);
|
||||
else if (lock->fl_flags & FL_FLOCK)
|
||||
++(*flock_count);
|
||||
ctx = inode->i_flctx;
|
||||
if (ctx) {
|
||||
*fcntl_count = ctx->flc_posix_cnt;
|
||||
*flock_count = ctx->flc_flock_cnt;
|
||||
}
|
||||
dout("counted %d flock locks and %d fcntl locks",
|
||||
*flock_count, *fcntl_count);
|
||||
|
@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
|
|||
int num_fcntl_locks, int num_flock_locks)
|
||||
{
|
||||
struct file_lock *lock;
|
||||
struct file_lock_context *ctx = inode->i_flctx;
|
||||
int err = 0;
|
||||
int seen_fcntl = 0;
|
||||
int seen_flock = 0;
|
||||
|
@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
|
|||
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
|
||||
num_fcntl_locks);
|
||||
|
||||
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
|
||||
if (lock->fl_flags & FL_POSIX) {
|
||||
++seen_fcntl;
|
||||
if (seen_fcntl > num_fcntl_locks) {
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
err = lock_to_ceph_filelock(lock, &flocks[l]);
|
||||
if (err)
|
||||
goto fail;
|
||||
++l;
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
spin_lock(&ctx->flc_lock);
|
||||
list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
|
||||
++seen_fcntl;
|
||||
if (seen_fcntl > num_fcntl_locks) {
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
err = lock_to_ceph_filelock(lock, &flocks[l]);
|
||||
if (err)
|
||||
goto fail;
|
||||
++l;
|
||||
}
|
||||
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
|
||||
if (lock->fl_flags & FL_FLOCK) {
|
||||
++seen_flock;
|
||||
if (seen_flock > num_flock_locks) {
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
err = lock_to_ceph_filelock(lock, &flocks[l]);
|
||||
if (err)
|
||||
goto fail;
|
||||
++l;
|
||||
list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
|
||||
++seen_flock;
|
||||
if (seen_flock > num_flock_locks) {
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
err = lock_to_ceph_filelock(lock, &flocks[l]);
|
||||
if (err)
|
||||
goto fail;
|
||||
++l;
|
||||
}
|
||||
fail:
|
||||
spin_unlock(&ctx->flc_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
struct ceph_filelock *flocks;
|
||||
|
||||
encode_again:
|
||||
spin_lock(&inode->i_lock);
|
||||
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
|
||||
spin_unlock(&inode->i_lock);
|
||||
flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
|
||||
sizeof(struct ceph_filelock), GFP_NOFS);
|
||||
if (!flocks) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
spin_lock(&inode->i_lock);
|
||||
err = ceph_encode_locks_to_buffer(inode, flocks,
|
||||
num_fcntl_locks,
|
||||
num_flock_locks);
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (err) {
|
||||
kfree(flocks);
|
||||
if (err == -ENOSPC)
|
||||
|
|
|
@ -1113,11 +1113,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* copied from fs/locks.c with a name change */
|
||||
#define cifs_for_each_lock(inode, lockp) \
|
||||
for (lockp = &inode->i_flock; *lockp != NULL; \
|
||||
lockp = &(*lockp)->fl_next)
|
||||
|
||||
struct lock_to_push {
|
||||
struct list_head llist;
|
||||
__u64 offset;
|
||||
|
@ -1132,8 +1127,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
|||
{
|
||||
struct inode *inode = cfile->dentry->d_inode;
|
||||
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
|
||||
struct file_lock *flock, **before;
|
||||
unsigned int count = 0, i = 0;
|
||||
struct file_lock *flock;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
unsigned int i;
|
||||
int rc = 0, xid, type;
|
||||
struct list_head locks_to_send, *el;
|
||||
struct lock_to_push *lck, *tmp;
|
||||
|
@ -1141,21 +1137,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
|||
|
||||
xid = get_xid();
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
cifs_for_each_lock(inode, before) {
|
||||
if ((*before)->fl_flags & FL_POSIX)
|
||||
count++;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (!flctx)
|
||||
goto out;
|
||||
|
||||
INIT_LIST_HEAD(&locks_to_send);
|
||||
|
||||
/*
|
||||
* Allocating count locks is enough because no FL_POSIX locks can be
|
||||
* added to the list while we are holding cinode->lock_sem that
|
||||
* Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
|
||||
* can be added to the list while we are holding cinode->lock_sem that
|
||||
* protects locking operations of this inode.
|
||||
*/
|
||||
for (; i < count; i++) {
|
||||
for (i = 0; i < flctx->flc_posix_cnt; i++) {
|
||||
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
|
||||
if (!lck) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -1165,11 +1157,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
|||
}
|
||||
|
||||
el = locks_to_send.next;
|
||||
spin_lock(&inode->i_lock);
|
||||
cifs_for_each_lock(inode, before) {
|
||||
flock = *before;
|
||||
if ((flock->fl_flags & FL_POSIX) == 0)
|
||||
continue;
|
||||
spin_lock(&flctx->flc_lock);
|
||||
list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
|
||||
if (el == &locks_to_send) {
|
||||
/*
|
||||
* The list ended. We don't have enough allocated
|
||||
|
@ -1189,9 +1178,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
|||
lck->length = length;
|
||||
lck->type = type;
|
||||
lck->offset = flock->fl_start;
|
||||
el = el->next;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
|
||||
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
|
||||
int stored_rc;
|
||||
|
|
|
@ -194,7 +194,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
|||
#ifdef CONFIG_FSNOTIFY
|
||||
inode->i_fsnotify_mask = 0;
|
||||
#endif
|
||||
|
||||
inode->i_flctx = NULL;
|
||||
this_cpu_inc(nr_inodes);
|
||||
|
||||
return 0;
|
||||
|
@ -237,6 +237,7 @@ void __destroy_inode(struct inode *inode)
|
|||
BUG_ON(inode_has_buffers(inode));
|
||||
security_inode_free(inode);
|
||||
fsnotify_inode_delete(inode);
|
||||
locks_free_lock_context(inode->i_flctx);
|
||||
if (!inode->i_nlink) {
|
||||
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
|
||||
atomic_long_dec(&inode->i_sb->s_remove_count);
|
||||
|
|
|
@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
|
|||
{
|
||||
struct inode *inode = nlmsvc_file_inode(file);
|
||||
struct file_lock *fl;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
struct nlm_host *lockhost;
|
||||
|
||||
if (!flctx || list_empty_careful(&flctx->flc_posix))
|
||||
return 0;
|
||||
again:
|
||||
file->f_locks = 0;
|
||||
spin_lock(&inode->i_lock);
|
||||
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
|
||||
spin_lock(&flctx->flc_lock);
|
||||
list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
|
||||
if (fl->fl_lmops != &nlmsvc_lock_operations)
|
||||
continue;
|
||||
|
||||
|
@ -180,7 +183,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
|
|||
if (match(lockhost, host)) {
|
||||
struct file_lock lock = *fl;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
lock.fl_type = F_UNLCK;
|
||||
lock.fl_start = 0;
|
||||
lock.fl_end = OFFSET_MAX;
|
||||
|
@ -192,7 +195,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
|
|||
goto again;
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
|
|||
{
|
||||
struct inode *inode = nlmsvc_file_inode(file);
|
||||
struct file_lock *fl;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
|
||||
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
|
||||
return 1;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
|
||||
if (fl->fl_lmops == &nlmsvc_lock_operations) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
return 1;
|
||||
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
|
||||
spin_lock(&flctx->flc_lock);
|
||||
list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
|
||||
if (fl->fl_lmops == &nlmsvc_lock_operations) {
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
file->f_locks = 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
569
fs/locks.c
569
fs/locks.c
File diff suppressed because it is too large
Load Diff
|
@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
|
|||
{
|
||||
struct inode *inode = state->inode;
|
||||
struct file_lock *fl;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
struct list_head *list;
|
||||
int status = 0;
|
||||
|
||||
if (inode->i_flock == NULL)
|
||||
if (flctx == NULL)
|
||||
goto out;
|
||||
|
||||
/* Protect inode->i_flock using the i_lock */
|
||||
spin_lock(&inode->i_lock);
|
||||
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
|
||||
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
|
||||
continue;
|
||||
list = &flctx->flc_posix;
|
||||
spin_lock(&flctx->flc_lock);
|
||||
restart:
|
||||
list_for_each_entry(fl, list, fl_list) {
|
||||
if (nfs_file_open_context(fl->fl_file) != ctx)
|
||||
continue;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
status = nfs4_lock_delegation_recall(fl, state, stateid);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&flctx->flc_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (list == &flctx->flc_posix) {
|
||||
list = &flctx->flc_flock;
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -1366,49 +1366,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
|
|||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct file_lock *fl;
|
||||
int status = 0;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
struct list_head *list;
|
||||
|
||||
if (inode->i_flock == NULL)
|
||||
if (flctx == NULL)
|
||||
return 0;
|
||||
|
||||
list = &flctx->flc_posix;
|
||||
|
||||
/* Guard against delegation returns and new lock/unlock calls */
|
||||
down_write(&nfsi->rwsem);
|
||||
/* Protect inode->i_flock using the BKL */
|
||||
spin_lock(&inode->i_lock);
|
||||
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
|
||||
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
|
||||
continue;
|
||||
spin_lock(&flctx->flc_lock);
|
||||
restart:
|
||||
list_for_each_entry(fl, list, fl_list) {
|
||||
if (nfs_file_open_context(fl->fl_file)->state != state)
|
||||
continue;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
status = ops->recover_lock(state, fl);
|
||||
switch (status) {
|
||||
case 0:
|
||||
break;
|
||||
case -ESTALE:
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
goto out;
|
||||
default:
|
||||
printk(KERN_ERR "NFS: %s: unhandled error %d\n",
|
||||
__func__, status);
|
||||
case -ENOMEM:
|
||||
case -NFS4ERR_DENIED:
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
|
||||
status = 0;
|
||||
case 0:
|
||||
break;
|
||||
case -ESTALE:
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
goto out;
|
||||
default:
|
||||
pr_err("NFS: %s: unhandled error %d\n",
|
||||
__func__, status);
|
||||
case -ENOMEM:
|
||||
case -NFS4ERR_DENIED:
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
|
||||
status = 0;
|
||||
}
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&flctx->flc_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (list == &flctx->flc_posix) {
|
||||
list = &flctx->flc_flock;
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
out:
|
||||
up_write(&nfsi->rwsem);
|
||||
return status;
|
||||
|
|
|
@ -826,11 +826,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
|
|||
struct nfs_pageio_descriptor *pgio)
|
||||
{
|
||||
size_t size;
|
||||
struct file_lock_context *flctx;
|
||||
|
||||
if (prev) {
|
||||
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
|
||||
return false;
|
||||
if (req->wb_context->dentry->d_inode->i_flock != NULL &&
|
||||
flctx = req->wb_context->dentry->d_inode->i_flctx;
|
||||
if (flctx != NULL &&
|
||||
!(list_empty_careful(&flctx->flc_posix) &&
|
||||
list_empty_careful(&flctx->flc_flock)) &&
|
||||
!nfs_match_lock_context(req->wb_lock_context,
|
||||
prev->wb_lock_context))
|
||||
return false;
|
||||
|
|
|
@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
|
|||
{
|
||||
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
||||
struct nfs_lock_context *l_ctx;
|
||||
struct file_lock_context *flctx = file_inode(file)->i_flctx;
|
||||
struct nfs_page *req;
|
||||
int do_flush, status;
|
||||
/*
|
||||
|
@ -1109,7 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
|
|||
do_flush = req->wb_page != page || req->wb_context != ctx;
|
||||
/* for now, flush if more than 1 request in page_group */
|
||||
do_flush |= req->wb_this_page != req;
|
||||
if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
|
||||
if (l_ctx && flctx &&
|
||||
!(list_empty_careful(&flctx->flc_posix) &&
|
||||
list_empty_careful(&flctx->flc_flock))) {
|
||||
do_flush |= l_ctx->lockowner.l_owner != current->files
|
||||
|| l_ctx->lockowner.l_pid != current->tgid;
|
||||
}
|
||||
|
@ -1170,6 +1173,13 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
|
|||
return PageUptodate(page) != 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
is_whole_file_wrlock(struct file_lock *fl)
|
||||
{
|
||||
return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
|
||||
fl->fl_type == F_WRLCK;
|
||||
}
|
||||
|
||||
/* If we know the page is up to date, and we're not using byte range locks (or
|
||||
* if we have the whole file locked for writing), it may be more efficient to
|
||||
* extend the write to cover the entire page in order to avoid fragmentation
|
||||
|
@ -1180,17 +1190,36 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
|
|||
*/
|
||||
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
|
||||
{
|
||||
int ret;
|
||||
struct file_lock_context *flctx = inode->i_flctx;
|
||||
struct file_lock *fl;
|
||||
|
||||
if (file->f_flags & O_DSYNC)
|
||||
return 0;
|
||||
if (!nfs_write_pageuptodate(page, inode))
|
||||
return 0;
|
||||
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
|
||||
return 1;
|
||||
if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
|
||||
inode->i_flock->fl_end == OFFSET_MAX &&
|
||||
inode->i_flock->fl_type != F_RDLCK))
|
||||
return 1;
|
||||
return 0;
|
||||
if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
|
||||
list_empty_careful(&flctx->flc_posix)))
|
||||
return 0;
|
||||
|
||||
/* Check to see if there are whole file write locks */
|
||||
ret = 0;
|
||||
spin_lock(&flctx->flc_lock);
|
||||
if (!list_empty(&flctx->flc_posix)) {
|
||||
fl = list_first_entry(&flctx->flc_posix, struct file_lock,
|
||||
fl_list);
|
||||
if (is_whole_file_wrlock(fl))
|
||||
ret = 1;
|
||||
} else if (!list_empty(&flctx->flc_flock)) {
|
||||
fl = list_first_entry(&flctx->flc_flock, struct file_lock,
|
||||
fl_list);
|
||||
if (fl->fl_type == F_WRLCK)
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -3477,7 +3477,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
|
|||
}
|
||||
|
||||
static int
|
||||
nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
|
||||
nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
|
||||
struct list_head *dispose)
|
||||
{
|
||||
if (arg & F_UNLCK)
|
||||
return lease_modify(onlist, arg, dispose);
|
||||
|
@ -5556,10 +5557,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
static bool
|
||||
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
|
||||
{
|
||||
struct file_lock **flpp;
|
||||
struct file_lock *fl;
|
||||
int status = false;
|
||||
struct file *filp = find_any_file(fp);
|
||||
struct inode *inode;
|
||||
struct file_lock_context *flctx;
|
||||
|
||||
if (!filp) {
|
||||
/* Any valid lock stateid should have some sort of access */
|
||||
|
@ -5568,15 +5570,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
|
|||
}
|
||||
|
||||
inode = file_inode(filp);
|
||||
flctx = inode->i_flctx;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
|
||||
if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
|
||||
status = true;
|
||||
break;
|
||||
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
|
||||
spin_lock(&flctx->flc_lock);
|
||||
list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
|
||||
if (fl->fl_owner == (fl_owner_t)lowner) {
|
||||
status = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
fput(filp);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
|
|||
return retval;
|
||||
}
|
||||
|
||||
if (unlikely(inode->i_flock && mandatory_lock(inode))) {
|
||||
if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
|
||||
retval = locks_mandatory_area(
|
||||
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
|
||||
inode, file, pos, count);
|
||||
|
|
|
@ -625,7 +625,7 @@ struct inode {
|
|||
atomic_t i_readcount; /* struct files open RO */
|
||||
#endif
|
||||
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
|
||||
struct file_lock *i_flock;
|
||||
struct file_lock_context *i_flctx;
|
||||
struct address_space i_data;
|
||||
struct list_head i_devices;
|
||||
union {
|
||||
|
@ -885,6 +885,8 @@ static inline struct file *get_file(struct file *f)
|
|||
/* legacy typedef, should eventually be removed */
|
||||
typedef void *fl_owner_t;
|
||||
|
||||
struct file_lock;
|
||||
|
||||
struct file_lock_operations {
|
||||
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
|
||||
void (*fl_release_private)(struct file_lock *);
|
||||
|
@ -898,7 +900,7 @@ struct lock_manager_operations {
|
|||
void (*lm_notify)(struct file_lock *); /* unblock callback */
|
||||
int (*lm_grant)(struct file_lock *, int);
|
||||
bool (*lm_break)(struct file_lock *);
|
||||
int (*lm_change)(struct file_lock **, int, struct list_head *);
|
||||
int (*lm_change)(struct file_lock *, int, struct list_head *);
|
||||
void (*lm_setup)(struct file_lock *, void **);
|
||||
};
|
||||
|
||||
|
@ -923,17 +925,17 @@ int locks_in_grace(struct net *);
|
|||
* FIXME: should we create a separate "struct lock_request" to help distinguish
|
||||
* these two uses?
|
||||
*
|
||||
* The i_flock list is ordered by:
|
||||
* The varous i_flctx lists are ordered by:
|
||||
*
|
||||
* 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
|
||||
* 2) lock owner
|
||||
* 3) lock range start
|
||||
* 4) lock range end
|
||||
* 1) lock owner
|
||||
* 2) lock range start
|
||||
* 3) lock range end
|
||||
*
|
||||
* Obviously, the last two criteria only matter for POSIX locks.
|
||||
*/
|
||||
struct file_lock {
|
||||
struct file_lock *fl_next; /* singly linked list for this inode */
|
||||
struct list_head fl_list; /* link into file_lock_context */
|
||||
struct hlist_node fl_link; /* node in global lists */
|
||||
struct list_head fl_block; /* circular list of blocked processes */
|
||||
fl_owner_t fl_owner;
|
||||
|
@ -964,6 +966,16 @@ struct file_lock {
|
|||
} fl_u;
|
||||
};
|
||||
|
||||
struct file_lock_context {
|
||||
spinlock_t flc_lock;
|
||||
struct list_head flc_flock;
|
||||
struct list_head flc_posix;
|
||||
struct list_head flc_lease;
|
||||
int flc_flock_cnt;
|
||||
int flc_posix_cnt;
|
||||
int flc_lease_cnt;
|
||||
};
|
||||
|
||||
/* The following constant reflects the upper bound of the file/locking space */
|
||||
#ifndef OFFSET_MAX
|
||||
#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
|
||||
|
@ -990,6 +1002,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
|
|||
extern int fcntl_getlease(struct file *filp);
|
||||
|
||||
/* fs/locks.c */
|
||||
void locks_free_lock_context(struct file_lock_context *ctx);
|
||||
void locks_free_lock(struct file_lock *fl);
|
||||
extern void locks_init_lock(struct file_lock *);
|
||||
extern struct file_lock * locks_alloc_lock(void);
|
||||
|
@ -1010,7 +1023,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
|
|||
extern void lease_get_mtime(struct inode *, struct timespec *time);
|
||||
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
|
||||
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
|
||||
extern int lease_modify(struct file_lock **, int, struct list_head *);
|
||||
extern int lease_modify(struct file_lock *, int, struct list_head *);
|
||||
#else /* !CONFIG_FILE_LOCKING */
|
||||
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
|
||||
struct flock __user *user)
|
||||
|
@ -1047,6 +1060,11 @@ static inline int fcntl_getlease(struct file *filp)
|
|||
return F_UNLCK;
|
||||
}
|
||||
|
||||
static inline void
|
||||
locks_free_lock_context(struct file_lock_context *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void locks_init_lock(struct file_lock *fl)
|
||||
{
|
||||
return;
|
||||
|
@ -1137,7 +1155,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int lease_modify(struct file_lock **before, int arg,
|
||||
static inline int lease_modify(struct file_lock *fl, int arg,
|
||||
struct list_head *dispose)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@ -1959,7 +1977,7 @@ static inline int locks_verify_truncate(struct inode *inode,
|
|||
struct file *filp,
|
||||
loff_t size)
|
||||
{
|
||||
if (inode->i_flock && mandatory_lock(inode))
|
||||
if (inode->i_flctx && mandatory_lock(inode))
|
||||
return locks_mandatory_area(
|
||||
FLOCK_VERIFY_WRITE, inode, filp,
|
||||
size < inode->i_size ? size : inode->i_size,
|
||||
|
@ -1973,11 +1991,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
|
|||
{
|
||||
/*
|
||||
* Since this check is lockless, we must ensure that any refcounts
|
||||
* taken are done before checking inode->i_flock. Otherwise, we could
|
||||
* end up racing with tasks trying to set a new lease on this file.
|
||||
* taken are done before checking i_flctx->flc_lease. Otherwise, we
|
||||
* could end up racing with tasks trying to set a new lease on this
|
||||
* file.
|
||||
*/
|
||||
smp_mb();
|
||||
if (inode->i_flock)
|
||||
if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
|
||||
return __break_lease(inode, mode, FL_LEASE);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1986,11 +2005,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
|
|||
{
|
||||
/*
|
||||
* Since this check is lockless, we must ensure that any refcounts
|
||||
* taken are done before checking inode->i_flock. Otherwise, we could
|
||||
* end up racing with tasks trying to set a new lease on this file.
|
||||
* taken are done before checking i_flctx->flc_lease. Otherwise, we
|
||||
* could end up racing with tasks trying to set a new lease on this
|
||||
* file.
|
||||
*/
|
||||
smp_mb();
|
||||
if (inode->i_flock)
|
||||
if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
|
||||
return __break_lease(inode, mode, FL_DELEG);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user