forked from luck/tmp_suning_uos_patched
This is a relatively quiet cycle for nfsd, mainly various bugfixes.
Possibly most interesting is Trond's fixes for some callback races that were due to my incomplete understanding of rpc client shutdown. Unfortunately at the last minute I've started noticing a new intermittent failure to send callbacks. As the logic seems basically correct, I'm leaving Trond's patches in for now, and hope to find a fix in the next week so I don't have to revert those patches. -----BEGIN PGP SIGNATURE----- iQJJBAABCAAzFiEEYtFWavXG9hZotryuJ5vNeUKO4b4FAl3r3AAVHGJmaWVsZHNA ZmllbGRzZXMub3JnAAoJECebzXlCjuG+rjkP/3L6DZs0Uv0BYbGq5Gmit0uoPSQk 8BT7oQhbagCh+ULRYWCnK6cz82wejR4Gzq4PLyl5x5Vcc5x+bLoPI9YgiRZlIbZu ZvSg93E6SITLfq5xRlDC0MlIVZkI+HoIfyYgv1aYiWvQ3834bcx4DxVm9h7cNpT3 x37anEFi1lv3n9fct3obOrs3AvCS76XyA6VVhcSLJ77amKQ+O7LI0crqUc6cuX2i CkTwTSDwyCrzkx3dZ2xDPDTbLecxw+Ce4adaby5v3GEQo3TOCmEWX92D3dvzfMmv ICU07FsVOILnIT/fmC91b1+JWVRLjUUBw5EPmDduwSP/yw4YnIEODFEP/wAUAmMJ vJ9hi9c1rThQ9n8h08RIwA2snhnpXRxKCWhpIRY6WM8DhHL9Y9AuVPYTKxhQOjPK l3wbOGcMW63NrTOPHHN7hTB0vDLgPKIXYVIrMvZTd/P7CghDDEbhT1gDvx/IL3Uq WrHKbJtK7rbx9i2bh5f6fH0DRrv7lxbD0ffunRRa3twPAe6zsG9WPjsbZZraZzEg O7/o3wZu2N7MpL5bXPfzB+5ylOTxvNWew07NJjA4BIOfwin3bw/71YfB0Vnoairv PhmbN2Dj4/t82ld0JU5GJWojpUfH4ARXM2Li9WO99wzx+KrxScsqGPnRMFe9dC7b Q7ltP1p0gUbkJ88Z =b2zA -----END PGP SIGNATURE----- Merge tag 'nfsd-5.5' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "This is a relatively quiet cycle for nfsd, mainly various bugfixes. Possibly most interesting is Trond's fixes for some callback races that were due to my incomplete understanding of rpc client shutdown. Unfortunately at the last minute I've started noticing a new intermittent failure to send callbacks. As the logic seems basically correct, I'm leaving Trond's patches in for now, and hope to find a fix in the next week so I don't have to revert those patches" * tag 'nfsd-5.5' of git://linux-nfs.org/~bfields/linux: (24 commits) nfsd: depend on CRYPTO_MD5 for legacy client tracking NFSD fixing possible null pointer derefering in copy offload nfsd: check for EBUSY from vfs_rmdir/vfs_unink. nfsd: Ensure CLONE persists data and metadata changes to the target file SUNRPC: Fix backchannel latency metrics nfsd: restore NFSv3 ACL support nfsd: v4 support requires CRYPTO_SHA256 nfsd: Fix cld_net->cn_tfm initialization lockd: remove __KERNEL__ ifdefs sunrpc: remove __KERNEL__ ifdefs race in exportfs_decode_fh() nfsd: Drop LIST_HEAD where the variable it declares is never used. nfsd: document callback_wq serialization of callback code nfsd: mark cb path down on unknown errors nfsd: Fix races between nfsd4_cb_release() and nfsd4_shutdown_callback() nfsd: minor 4.1 callback cleanup SUNRPC: Fix svcauth_gss_proxy_init() SUNRPC: Trace gssproxy upcall results sunrpc: fix crash when cache_head become valid before update nfsd: remove private bin2hex implementation ...
This commit is contained in:
commit
911d137ab0
|
@ -73,7 +73,8 @@ config NFSD_V4
|
|||
select NFSD_V3
|
||||
select FS_POSIX_ACL
|
||||
select SUNRPC_GSS
|
||||
select CRYPTO
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA256
|
||||
select GRACE_PERIOD
|
||||
help
|
||||
This option enables support in your system's NFS server for
|
||||
|
|
|
@ -685,8 +685,6 @@ nfsd_file_cache_purge(struct net *net)
|
|||
void
|
||||
nfsd_file_cache_shutdown(void)
|
||||
{
|
||||
LIST_HEAD(dispose);
|
||||
|
||||
set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
|
||||
|
||||
lease_unregister_notifier(&nfsd_file_lease_notifier);
|
||||
|
|
|
@ -826,6 +826,31 @@ static int max_cb_time(struct net *net)
|
|||
return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
|
||||
}
|
||||
|
||||
static struct workqueue_struct *callback_wq;
|
||||
|
||||
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
|
||||
{
|
||||
return queue_work(callback_wq, &cb->cb_work);
|
||||
}
|
||||
|
||||
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
|
||||
{
|
||||
atomic_inc(&clp->cl_cb_inflight);
|
||||
}
|
||||
|
||||
static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
|
||||
{
|
||||
|
||||
if (atomic_dec_and_test(&clp->cl_cb_inflight))
|
||||
wake_up_var(&clp->cl_cb_inflight);
|
||||
}
|
||||
|
||||
static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
|
||||
{
|
||||
wait_var_event(&clp->cl_cb_inflight,
|
||||
!atomic_read(&clp->cl_cb_inflight));
|
||||
}
|
||||
|
||||
static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
|
||||
{
|
||||
if (clp->cl_minorversion == 0) {
|
||||
|
@ -937,14 +962,21 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
|
|||
clp->cl_cb_state = NFSD4_CB_UP;
|
||||
}
|
||||
|
||||
static void nfsd4_cb_probe_release(void *calldata)
|
||||
{
|
||||
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
|
||||
|
||||
nfsd41_cb_inflight_end(clp);
|
||||
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
|
||||
/* XXX: release method to ensure we set the cb channel down if
|
||||
* necessary on early failure? */
|
||||
.rpc_call_done = nfsd4_cb_probe_done,
|
||||
.rpc_release = nfsd4_cb_probe_release,
|
||||
};
|
||||
|
||||
static struct workqueue_struct *callback_wq;
|
||||
|
||||
/*
|
||||
* Poke the callback thread to process any updates to the callback
|
||||
* parameters, and send a null probe.
|
||||
|
@ -975,9 +1007,12 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
|
|||
* If the slot is available, then mark it busy. Otherwise, set the
|
||||
* thread for sleeping on the callback RPC wait queue.
|
||||
*/
|
||||
static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
|
||||
static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
|
||||
{
|
||||
if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
|
||||
struct nfs4_client *clp = cb->cb_clp;
|
||||
|
||||
if (!cb->cb_holds_slot &&
|
||||
test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
|
||||
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
|
||||
/* Race breaker */
|
||||
if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
|
||||
|
@ -986,9 +1021,31 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
|
|||
}
|
||||
rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
|
||||
}
|
||||
cb->cb_holds_slot = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
|
||||
{
|
||||
struct nfs4_client *clp = cb->cb_clp;
|
||||
|
||||
if (cb->cb_holds_slot) {
|
||||
cb->cb_holds_slot = false;
|
||||
clear_bit(0, &clp->cl_cb_slot_busy);
|
||||
rpc_wake_up_next(&clp->cl_cb_waitq);
|
||||
}
|
||||
}
|
||||
|
||||
static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
|
||||
{
|
||||
struct nfs4_client *clp = cb->cb_clp;
|
||||
|
||||
nfsd41_cb_release_slot(cb);
|
||||
if (cb->cb_ops && cb->cb_ops->release)
|
||||
cb->cb_ops->release(cb);
|
||||
nfsd41_cb_inflight_end(clp);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: cb_sequence should support referring call lists, cachethis, multiple
|
||||
* slots, and mark callback channel down on communication errors.
|
||||
|
@ -1005,11 +1062,8 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
|
|||
*/
|
||||
cb->cb_seq_status = 1;
|
||||
cb->cb_status = 0;
|
||||
if (minorversion) {
|
||||
if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
|
||||
return;
|
||||
cb->cb_holds_slot = true;
|
||||
}
|
||||
if (minorversion && !nfsd41_cb_get_slot(cb, task))
|
||||
return;
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
|
@ -1072,13 +1126,12 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
|
|||
}
|
||||
break;
|
||||
default:
|
||||
nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
|
||||
dprintk("%s: unprocessed error %d\n", __func__,
|
||||
cb->cb_seq_status);
|
||||
}
|
||||
|
||||
cb->cb_holds_slot = false;
|
||||
clear_bit(0, &clp->cl_cb_slot_busy);
|
||||
rpc_wake_up_next(&clp->cl_cb_waitq);
|
||||
nfsd41_cb_release_slot(cb);
|
||||
dprintk("%s: freed slot, new seqid=%d\n", __func__,
|
||||
clp->cl_cb_session->se_cb_seq_nr);
|
||||
|
||||
|
@ -1091,8 +1144,10 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
|
|||
ret = false;
|
||||
goto out;
|
||||
need_restart:
|
||||
task->tk_status = 0;
|
||||
cb->cb_need_restart = true;
|
||||
if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
|
||||
task->tk_status = 0;
|
||||
cb->cb_need_restart = true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1134,9 +1189,9 @@ static void nfsd4_cb_release(void *calldata)
|
|||
struct nfsd4_callback *cb = calldata;
|
||||
|
||||
if (cb->cb_need_restart)
|
||||
nfsd4_run_cb(cb);
|
||||
nfsd4_queue_cb(cb);
|
||||
else
|
||||
cb->cb_ops->release(cb);
|
||||
nfsd41_destroy_cb(cb);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1170,6 +1225,7 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
|
|||
*/
|
||||
nfsd4_run_cb(&clp->cl_cb_null);
|
||||
flush_workqueue(callback_wq);
|
||||
nfsd41_cb_inflight_wait_complete(clp);
|
||||
}
|
||||
|
||||
/* requires cl_lock: */
|
||||
|
@ -1187,6 +1243,12 @@ static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note there isn't a lot of locking in this code; instead we depend on
|
||||
* the fact that it is run from the callback_wq, which won't run two
|
||||
* work items at once. So, for example, callback_wq handles all access
|
||||
* of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
|
||||
*/
|
||||
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
|
||||
{
|
||||
struct nfs4_cb_conn conn;
|
||||
|
@ -1255,8 +1317,7 @@ nfsd4_run_cb_work(struct work_struct *work)
|
|||
clnt = clp->cl_cb_client;
|
||||
if (!clnt) {
|
||||
/* Callback channel broken, or client killed; give up: */
|
||||
if (cb->cb_ops && cb->cb_ops->release)
|
||||
cb->cb_ops->release(cb);
|
||||
nfsd41_destroy_cb(cb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1265,6 +1326,7 @@ nfsd4_run_cb_work(struct work_struct *work)
|
|||
*/
|
||||
if (!cb->cb_ops && clp->cl_minorversion) {
|
||||
clp->cl_cb_state = NFSD4_CB_UP;
|
||||
nfsd41_destroy_cb(cb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1290,5 +1352,9 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
|
|||
|
||||
void nfsd4_run_cb(struct nfsd4_callback *cb)
|
||||
{
|
||||
queue_work(callback_wq, &cb->cb_work);
|
||||
struct nfs4_client *clp = cb->cb_clp;
|
||||
|
||||
nfsd41_cb_inflight_begin(clp);
|
||||
if (!nfsd4_queue_cb(cb))
|
||||
nfsd41_cb_inflight_end(clp);
|
||||
}
|
||||
|
|
|
@ -1077,7 +1077,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
goto out;
|
||||
|
||||
status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
|
||||
dst->nf_file, clone->cl_dst_pos, clone->cl_count);
|
||||
dst->nf_file, clone->cl_dst_pos, clone->cl_count,
|
||||
EX_ISSYNC(cstate->current_fh.fh_export));
|
||||
|
||||
nfsd_file_put(dst);
|
||||
nfsd_file_put(src);
|
||||
|
@ -1297,7 +1298,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
out:
|
||||
return status;
|
||||
out_err:
|
||||
cleanup_async_copy(async_copy);
|
||||
if (async_copy)
|
||||
cleanup_async_copy(async_copy);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1578,6 +1578,7 @@ nfsd4_cld_tracking_init(struct net *net)
|
|||
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
||||
bool running;
|
||||
int retries = 10;
|
||||
struct crypto_shash *tfm;
|
||||
|
||||
status = nfs4_cld_state_init(net);
|
||||
if (status)
|
||||
|
@ -1586,11 +1587,6 @@ nfsd4_cld_tracking_init(struct net *net)
|
|||
status = __nfsd4_init_cld_pipe(net);
|
||||
if (status)
|
||||
goto err_shutdown;
|
||||
nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0);
|
||||
if (IS_ERR(nn->cld_net->cn_tfm)) {
|
||||
status = PTR_ERR(nn->cld_net->cn_tfm);
|
||||
goto err_remove;
|
||||
}
|
||||
|
||||
/*
|
||||
* rpc pipe upcalls take 30 seconds to time out, so we don't want to
|
||||
|
@ -1607,6 +1603,12 @@ nfsd4_cld_tracking_init(struct net *net)
|
|||
status = -ETIMEDOUT;
|
||||
goto err_remove;
|
||||
}
|
||||
tfm = crypto_alloc_shash("sha256", 0, 0);
|
||||
if (IS_ERR(tfm)) {
|
||||
status = PTR_ERR(tfm);
|
||||
goto err_remove;
|
||||
}
|
||||
nn->cld_net->cn_tfm = tfm;
|
||||
|
||||
status = nfsd4_cld_get_version(nn);
|
||||
if (status == -EOPNOTSUPP)
|
||||
|
@ -1850,19 +1852,14 @@ nfsd4_umh_cltrack_upcall(char *cmd, char *arg, char *env0, char *env1)
|
|||
static char *
|
||||
bin_to_hex_dup(const unsigned char *src, int srclen)
|
||||
{
|
||||
int i;
|
||||
char *buf, *hex;
|
||||
char *buf;
|
||||
|
||||
/* +1 for terminating NULL */
|
||||
buf = kmalloc((srclen * 2) + 1, GFP_KERNEL);
|
||||
buf = kzalloc((srclen * 2) + 1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return buf;
|
||||
|
||||
hex = buf;
|
||||
for (i = 0; i < srclen; i++) {
|
||||
sprintf(hex, "%2.2x", *src++);
|
||||
hex += 2;
|
||||
}
|
||||
bin2hex(buf, src, srclen);
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
|
|
@ -2382,10 +2382,10 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
|
|||
access = bmap_to_share_mode(ols->st_access_bmap);
|
||||
deny = bmap_to_share_mode(ols->st_deny_bmap);
|
||||
|
||||
seq_printf(s, "access: \%s\%s, ",
|
||||
seq_printf(s, "access: %s%s, ",
|
||||
access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
|
||||
access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
|
||||
seq_printf(s, "deny: \%s\%s, ",
|
||||
seq_printf(s, "deny: %s%s, ",
|
||||
deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
|
||||
deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
|
||||
|
||||
|
@ -3548,12 +3548,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
|
|||
(bool)seq->cachethis)
|
||||
return false;
|
||||
/*
|
||||
* If there's an error than the reply can have fewer ops than
|
||||
* the call. But if we cached a reply with *more* ops than the
|
||||
* call you're sending us now, then this new call is clearly not
|
||||
* really a replay of the old one:
|
||||
* If there's an error then the reply can have fewer ops than
|
||||
* the call.
|
||||
*/
|
||||
if (slot->sl_opcnt < argp->opcnt)
|
||||
if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
|
||||
return false;
|
||||
/*
|
||||
* But if we cached a reply with *more* ops than the call you're
|
||||
* sending us now, then this new call is clearly not really a
|
||||
* replay of the old one:
|
||||
*/
|
||||
if (slot->sl_opcnt > argp->opcnt)
|
||||
return false;
|
||||
/* This is the only check explicitly called by spec: */
|
||||
if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
|
||||
|
|
|
@ -3452,7 +3452,6 @@ static __be32 nfsd4_encode_splice_read(
|
|||
struct xdr_stream *xdr = &resp->xdr;
|
||||
struct xdr_buf *buf = xdr->buf;
|
||||
u32 eof;
|
||||
long len;
|
||||
int space_left;
|
||||
__be32 nfserr;
|
||||
__be32 *p = xdr->p - 2;
|
||||
|
@ -3461,7 +3460,6 @@ static __be32 nfsd4_encode_splice_read(
|
|||
if (xdr->end - xdr->p < 1)
|
||||
return nfserr_resource;
|
||||
|
||||
len = maxcount;
|
||||
nfserr = nfsd_splice_read(read->rd_rqstp, read->rd_fhp,
|
||||
file, read->rd_offset, &maxcount, &eof);
|
||||
read->rd_length = maxcount;
|
||||
|
|
|
@ -280,7 +280,8 @@ void nfsd_lockd_shutdown(void);
|
|||
#define nfserr_union_notsupp cpu_to_be32(NFS4ERR_UNION_NOTSUPP)
|
||||
#define nfserr_offload_denied cpu_to_be32(NFS4ERR_OFFLOAD_DENIED)
|
||||
#define nfserr_wrong_lfs cpu_to_be32(NFS4ERR_WRONG_LFS)
|
||||
#define nfserr_badlabel cpu_to_be32(NFS4ERR_BADLABEL)
|
||||
#define nfserr_badlabel cpu_to_be32(NFS4ERR_BADLABEL)
|
||||
#define nfserr_file_open cpu_to_be32(NFS4ERR_FILE_OPEN)
|
||||
|
||||
/* error codes for internal use */
|
||||
/* if a request fails due to kmalloc failure, it gets dropped.
|
||||
|
|
|
@ -95,12 +95,11 @@ static const struct svc_version *nfsd_acl_version[] = {
|
|||
|
||||
#define NFSD_ACL_MINVERS 2
|
||||
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
|
||||
static const struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
|
||||
|
||||
static struct svc_program nfsd_acl_program = {
|
||||
.pg_prog = NFS_ACL_PROGRAM,
|
||||
.pg_nvers = NFSD_ACL_NRVERS,
|
||||
.pg_vers = nfsd_acl_versions,
|
||||
.pg_vers = nfsd_acl_version,
|
||||
.pg_name = "nfsacl",
|
||||
.pg_class = "nfsd",
|
||||
.pg_stats = &nfsd_acl_svcstats,
|
||||
|
|
|
@ -367,6 +367,7 @@ struct nfs4_client {
|
|||
struct net *net;
|
||||
struct list_head async_copies; /* list of async copies */
|
||||
spinlock_t async_lock; /* lock for async copies */
|
||||
atomic_t cl_cb_inflight; /* Outstanding callbacks */
|
||||
};
|
||||
|
||||
/* struct nfs4_client_reset
|
||||
|
|
|
@ -525,7 +525,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|||
#endif
|
||||
|
||||
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
|
||||
u64 dst_pos, u64 count)
|
||||
u64 dst_pos, u64 count, bool sync)
|
||||
{
|
||||
loff_t cloned;
|
||||
|
||||
|
@ -534,6 +534,12 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
|
|||
return nfserrno(cloned);
|
||||
if (count && cloned != count)
|
||||
return nfserrno(-EINVAL);
|
||||
if (sync) {
|
||||
loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
|
||||
int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
|
||||
if (status < 0)
|
||||
return nfserrno(status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1809,7 +1815,17 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
|
|||
out_drop_write:
|
||||
fh_drop_write(fhp);
|
||||
out_nfserr:
|
||||
err = nfserrno(host_err);
|
||||
if (host_err == -EBUSY) {
|
||||
/* name is mounted-on. There is no perfect
|
||||
* error status.
|
||||
*/
|
||||
if (nfsd_v4client(rqstp))
|
||||
err = nfserr_file_open;
|
||||
else
|
||||
err = nfserr_acces;
|
||||
} else {
|
||||
err = nfserrno(host_err);
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
|
|||
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
|
||||
struct file *, loff_t, loff_t, int);
|
||||
__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
|
||||
u64, u64);
|
||||
u64, u64, bool);
|
||||
#endif /* CONFIG_NFSD_V4 */
|
||||
__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
|
||||
char *name, int len, struct iattr *attrs,
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef LINUX_LOCKD_DEBUG_H
|
||||
#define LINUX_LOCKD_DEBUG_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/sunrpc/debug.h>
|
||||
|
||||
/*
|
||||
|
@ -25,8 +23,6 @@
|
|||
# define ifdebug(flag) if (0)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
* Debug flags
|
||||
*/
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef LINUX_LOCKD_LOCKD_H
|
||||
#define LINUX_LOCKD_LOCKD_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <net/ipv6.h>
|
||||
|
@ -373,6 +371,4 @@ static inline int nlm_compare_locks(const struct file_lock *fl1,
|
|||
|
||||
extern const struct lock_manager_operations nlmsvc_lock_operations;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* LINUX_LOCKD_LOCKD_H */
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef _LINUX_SUNRPC_AUTH_H
|
||||
#define _LINUX_SUNRPC_AUTH_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/sunrpc/sched.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
|
@ -194,5 +192,4 @@ struct rpc_cred *get_rpccred(struct rpc_cred *cred)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_AUTH_H */
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#ifndef _LINUX_SUNRPC_AUTH_GSS_H
|
||||
#define _LINUX_SUNRPC_AUTH_GSS_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/sunrpc/auth.h>
|
||||
#include <linux/sunrpc/svc.h>
|
||||
|
@ -90,6 +89,5 @@ struct gss_cred {
|
|||
unsigned long gc_upcall_timestamp;
|
||||
};
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_AUTH_GSS_H */
|
||||
|
||||
|
|
|
@ -109,8 +109,6 @@ struct rpc_procinfo {
|
|||
const char * p_name; /* name of procedure */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct rpc_create_args {
|
||||
struct net *net;
|
||||
int protocol;
|
||||
|
@ -238,5 +236,4 @@ static inline int rpc_reply_expected(struct rpc_task *task)
|
|||
(task->tk_msg.rpc_proc->p_decode != NULL);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_CLNT_H */
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#ifndef _LINUX_SUNRPC_GSS_API_H
|
||||
#define _LINUX_SUNRPC_GSS_API_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
#include <linux/uio.h>
|
||||
|
@ -160,6 +159,5 @@ struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
|
|||
* corresponding call to gss_mech_put. */
|
||||
void gss_mech_put(struct gss_api_mech *);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_GSS_API_H */
|
||||
|
||||
|
|
|
@ -34,8 +34,6 @@
|
|||
#ifndef _LINUX_SUNRPC_GSS_ERR_H
|
||||
#define _LINUX_SUNRPC_GSS_ERR_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
typedef unsigned int OM_uint32;
|
||||
|
||||
/*
|
||||
|
@ -163,5 +161,4 @@ typedef unsigned int OM_uint32;
|
|||
/* XXXX This is a necessary evil until the spec is fixed */
|
||||
#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __LINUX_SUNRPC_GSS_ERR_H */
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
#ifndef _LINUX_SUNRPC_MSGPROT_H_
|
||||
#define _LINUX_SUNRPC_MSGPROT_H_
|
||||
|
||||
#ifdef __KERNEL__ /* user programs should get these from the rpc header files */
|
||||
|
||||
#define RPC_VERSION 2
|
||||
|
||||
/* size of an XDR encoding unit in bytes, i.e. 32bit */
|
||||
|
@ -217,5 +215,4 @@ typedef __be32 rpc_fraghdr;
|
|||
/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
|
||||
#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H
|
||||
#define _LINUX_SUNRPC_RPC_PIPE_FS_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct rpc_pipe_dir_head {
|
||||
|
@ -133,4 +131,3 @@ extern void unregister_rpc_pipefs(void);
|
|||
extern bool gssd_running(struct net *net);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef _LINUX_SUNRPC_SVCAUTH_H_
|
||||
#define _LINUX_SUNRPC_SVCAUTH_H_
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
#include <linux/sunrpc/cache.h>
|
||||
|
@ -185,6 +183,4 @@ static inline unsigned long hash_mem(char const *buf, int length, int bits)
|
|||
return full_name_hash(NULL, buf, length) >> (32 - bits);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H
|
||||
#define _LINUX_SUNRPC_SVCAUTH_GSS_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sunrpc/types.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
|
@ -24,5 +23,4 @@ void gss_svc_shutdown_net(struct net *net);
|
|||
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
|
||||
u32 svcauth_gss_flavor(struct auth_domain *dom);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
#ifndef _SUNRPC_XDR_H_
|
||||
#define _SUNRPC_XDR_H_
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/uio.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
@ -552,6 +550,5 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
|
|||
*array = be32_to_cpup(p);
|
||||
return retval;
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _SUNRPC_XDR_H_ */
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#include <linux/sunrpc/xdr.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define RPC_MIN_SLOT_TABLE (2U)
|
||||
#define RPC_DEF_SLOT_TABLE (16U)
|
||||
#define RPC_MAX_SLOT_TABLE_LIMIT (65536U)
|
||||
|
@ -506,6 +504,4 @@ static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__*/
|
||||
|
||||
#endif /* _LINUX_SUNRPC_XPRT_H */
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
#ifndef _LINUX_SUNRPC_XPRTSOCK_H
|
||||
#define _LINUX_SUNRPC_XPRTSOCK_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
int init_socket_xprt(void);
|
||||
void cleanup_socket_xprt(void);
|
||||
|
||||
|
@ -91,6 +89,4 @@ struct sock_xprt {
|
|||
#define XPRT_SOCK_WAKE_PENDING (6)
|
||||
#define XPRT_SOCK_WAKE_DISCONNECT (7)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
|
||||
|
|
|
@ -126,6 +126,34 @@ DEFINE_GSSAPI_EVENT(verify_mic);
|
|||
DEFINE_GSSAPI_EVENT(wrap);
|
||||
DEFINE_GSSAPI_EVENT(unwrap);
|
||||
|
||||
TRACE_EVENT(rpcgss_accept_upcall,
|
||||
TP_PROTO(
|
||||
__be32 xid,
|
||||
u32 major_status,
|
||||
u32 minor_status
|
||||
),
|
||||
|
||||
TP_ARGS(xid, major_status, minor_status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, xid)
|
||||
__field(u32, minor_status)
|
||||
__field(unsigned long, major_status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->xid = be32_to_cpu(xid);
|
||||
__entry->minor_status = minor_status;
|
||||
__entry->major_status = major_status;
|
||||
),
|
||||
|
||||
TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
|
||||
__entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" :
|
||||
show_gss_status(__entry->major_status),
|
||||
__entry->major_status, __entry->minor_status
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
/**
|
||||
** GSS auth unwrap failures
|
||||
|
@ -355,6 +383,23 @@ TRACE_EVENT(rpcgss_createauth,
|
|||
show_pseudoflavor(__entry->flavor), __entry->error)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpcgss_oid_to_mech,
|
||||
TP_PROTO(
|
||||
const char *oid
|
||||
),
|
||||
|
||||
TP_ARGS(oid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(oid, oid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(oid, oid);
|
||||
),
|
||||
|
||||
TP_printk("mech for oid %s was not found", __get_str(oid))
|
||||
);
|
||||
|
||||
#endif /* _TRACE_RPCGSS_H */
|
||||
|
||||
|
|
|
@ -1564,31 +1564,47 @@ DEFINE_ERROR_EVENT(chunk);
|
|||
** Server-side RDMA API events
|
||||
**/
|
||||
|
||||
TRACE_EVENT(svcrdma_dma_map_page,
|
||||
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const void *page
|
||||
u64 dma_addr,
|
||||
u32 length
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, page),
|
||||
TP_ARGS(rdma, dma_addr, length),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, page);
|
||||
__field(u64, dma_addr)
|
||||
__field(u32, length)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->page = page;
|
||||
__entry->dma_addr = dma_addr;
|
||||
__entry->length = length;
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s page=%p",
|
||||
__get_str(addr), __get_str(device), __entry->page
|
||||
TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
|
||||
__get_str(addr), __get_str(device),
|
||||
__entry->dma_addr, __entry->length
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_SVC_DMA_EVENT(name) \
|
||||
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
|
||||
TP_PROTO( \
|
||||
const struct svcxprt_rdma *rdma,\
|
||||
u64 dma_addr, \
|
||||
u32 length \
|
||||
), \
|
||||
TP_ARGS(rdma, dma_addr, length))
|
||||
|
||||
DEFINE_SVC_DMA_EVENT(dma_map_page);
|
||||
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
|
||||
|
||||
TRACE_EVENT(svcrdma_dma_map_rwctx,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
|
|
|
@ -14,6 +14,26 @@
|
|||
#include <linux/net.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_BADVERF);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDVERF);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_TOOWEAK);
|
||||
TRACE_DEFINE_ENUM(RPCSEC_GSS_CREDPROBLEM);
|
||||
TRACE_DEFINE_ENUM(RPCSEC_GSS_CTXPROBLEM);
|
||||
|
||||
#define rpc_show_auth_stat(status) \
|
||||
__print_symbolic(status, \
|
||||
{ RPC_AUTH_OK, "AUTH_OK" }, \
|
||||
{ RPC_AUTH_BADCRED, "BADCRED" }, \
|
||||
{ RPC_AUTH_REJECTEDCRED, "REJECTEDCRED" }, \
|
||||
{ RPC_AUTH_BADVERF, "BADVERF" }, \
|
||||
{ RPC_AUTH_REJECTEDVERF, "REJECTEDVERF" }, \
|
||||
{ RPC_AUTH_TOOWEAK, "TOOWEAK" }, \
|
||||
{ RPCSEC_GSS_CREDPROBLEM, "GSS_CREDPROBLEM" }, \
|
||||
{ RPCSEC_GSS_CTXPROBLEM, "GSS_CTXPROBLEM" }) \
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_task_status,
|
||||
|
||||
TP_PROTO(const struct rpc_task *task),
|
||||
|
@ -960,6 +980,41 @@ TRACE_EVENT(svc_recv,
|
|||
show_rqstp_flags(__entry->flags))
|
||||
);
|
||||
|
||||
#define svc_show_status(status) \
|
||||
__print_symbolic(status, \
|
||||
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
|
||||
{ SVC_SYSERR, "SVC_SYSERR" }, \
|
||||
{ SVC_VALID, "SVC_VALID" }, \
|
||||
{ SVC_NEGATIVE, "SVC_NEGATIVE" }, \
|
||||
{ SVC_OK, "SVC_OK" }, \
|
||||
{ SVC_DROP, "SVC_DROP" }, \
|
||||
{ SVC_CLOSE, "SVC_CLOSE" }, \
|
||||
{ SVC_DENIED, "SVC_DENIED" }, \
|
||||
{ SVC_PENDING, "SVC_PENDING" }, \
|
||||
{ SVC_COMPLETE, "SVC_COMPLETE" })
|
||||
|
||||
TRACE_EVENT(svc_authenticate,
|
||||
TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat),
|
||||
|
||||
TP_ARGS(rqst, auth_res, auth_stat),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, xid)
|
||||
__field(unsigned long, svc_status)
|
||||
__field(unsigned long, auth_stat)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->svc_status = auth_res;
|
||||
__entry->auth_stat = be32_to_cpu(auth_stat);
|
||||
),
|
||||
|
||||
TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
|
||||
__entry->xid, svc_show_status(__entry->svc_status),
|
||||
rpc_show_auth_stat(__entry->auth_stat))
|
||||
);
|
||||
|
||||
TRACE_EVENT(svc_process,
|
||||
TP_PROTO(const struct svc_rqst *rqst, const char *name),
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/sunrpc/sched.h>
|
||||
#include <linux/sunrpc/gss_api.h>
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
#include <trace/events/rpcgss.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
||||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
|
@ -158,7 +159,6 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
|
|||
|
||||
if (sprint_oid(obj->data, obj->len, buf, sizeof(buf)) < 0)
|
||||
return NULL;
|
||||
dprintk("RPC: %s(%s)\n", __func__, buf);
|
||||
request_module("rpc-auth-gss-%s", buf);
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -172,6 +172,8 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
|
|||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!gm)
|
||||
trace_rpcgss_oid_to_mech(buf);
|
||||
return gm;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,9 @@
|
|||
#include <linux/sunrpc/svcauth.h>
|
||||
#include <linux/sunrpc/svcauth_gss.h>
|
||||
#include <linux/sunrpc/cache.h>
|
||||
|
||||
#include <trace/events/rpcgss.h>
|
||||
|
||||
#include "gss_rpc_upcall.h"
|
||||
|
||||
|
||||
|
@ -1075,24 +1078,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Ok this is really heavily depending on a set of semantics in
|
||||
* how rqstp is set up by svc_recv and pages laid down by the
|
||||
* server when reading a request. We are basically guaranteed that
|
||||
* the token lays all down linearly across a set of pages, starting
|
||||
* at iov_base in rq_arg.head[0] which happens to be the first of a
|
||||
* set of pages stored in rq_pages[].
|
||||
* rq_arg.head[0].iov_base will provide us the page_base to pass
|
||||
* to the upcall.
|
||||
*/
|
||||
static inline int
|
||||
gss_read_proxy_verf(struct svc_rqst *rqstp,
|
||||
struct rpc_gss_wire_cred *gc, __be32 *authp,
|
||||
struct xdr_netobj *in_handle,
|
||||
struct gssp_in_token *in_token)
|
||||
static void gss_free_in_token_pages(struct gssp_in_token *in_token)
|
||||
{
|
||||
u32 inlen;
|
||||
int i;
|
||||
|
||||
i = 0;
|
||||
inlen = in_token->page_len;
|
||||
while (inlen) {
|
||||
if (in_token->pages[i])
|
||||
put_page(in_token->pages[i]);
|
||||
inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
|
||||
}
|
||||
|
||||
kfree(in_token->pages);
|
||||
in_token->pages = NULL;
|
||||
}
|
||||
|
||||
static int gss_read_proxy_verf(struct svc_rqst *rqstp,
|
||||
struct rpc_gss_wire_cred *gc, __be32 *authp,
|
||||
struct xdr_netobj *in_handle,
|
||||
struct gssp_in_token *in_token)
|
||||
{
|
||||
struct kvec *argv = &rqstp->rq_arg.head[0];
|
||||
u32 inlen;
|
||||
int res;
|
||||
unsigned int page_base, length;
|
||||
int pages, i, res;
|
||||
size_t inlen;
|
||||
|
||||
res = gss_read_common_verf(gc, argv, authp, in_handle);
|
||||
if (res)
|
||||
|
@ -1102,10 +1113,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp,
|
|||
if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
|
||||
return SVC_DENIED;
|
||||
|
||||
in_token->pages = rqstp->rq_pages;
|
||||
in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
|
||||
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
|
||||
in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!in_token->pages)
|
||||
return SVC_DENIED;
|
||||
in_token->page_base = 0;
|
||||
in_token->page_len = inlen;
|
||||
for (i = 0; i < pages; i++) {
|
||||
in_token->pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!in_token->pages[i]) {
|
||||
gss_free_in_token_pages(in_token);
|
||||
return SVC_DENIED;
|
||||
}
|
||||
}
|
||||
|
||||
length = min_t(unsigned int, inlen, argv->iov_len);
|
||||
memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
|
||||
inlen -= length;
|
||||
|
||||
i = 1;
|
||||
page_base = rqstp->rq_arg.page_base;
|
||||
while (inlen) {
|
||||
length = min_t(unsigned int, inlen, PAGE_SIZE);
|
||||
memcpy(page_address(in_token->pages[i]),
|
||||
page_address(rqstp->rq_arg.pages[i]) + page_base,
|
||||
length);
|
||||
|
||||
inlen -= length;
|
||||
page_base = 0;
|
||||
i++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1270,9 +1307,8 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
|||
if (status)
|
||||
goto out;
|
||||
|
||||
dprintk("RPC: svcauth_gss: gss major status = %d "
|
||||
"minor status = %d\n",
|
||||
ud.major_status, ud.minor_status);
|
||||
trace_rpcgss_accept_upcall(rqstp->rq_xid, ud.major_status,
|
||||
ud.minor_status);
|
||||
|
||||
switch (ud.major_status) {
|
||||
case GSS_S_CONTINUE_NEEDED:
|
||||
|
@ -1280,8 +1316,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
|||
break;
|
||||
case GSS_S_COMPLETE:
|
||||
status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
|
||||
if (status)
|
||||
if (status) {
|
||||
pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
|
||||
__func__, status);
|
||||
goto out;
|
||||
}
|
||||
cli_handle.data = (u8 *)&handle;
|
||||
cli_handle.len = sizeof(handle);
|
||||
break;
|
||||
|
@ -1292,15 +1331,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
|||
|
||||
/* Got an answer to the upcall; use it: */
|
||||
if (gss_write_init_verf(sn->rsc_cache, rqstp,
|
||||
&cli_handle, &ud.major_status))
|
||||
&cli_handle, &ud.major_status)) {
|
||||
pr_info("%s: gss_write_init_verf failed\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
if (gss_write_resv(resv, PAGE_SIZE,
|
||||
&cli_handle, &ud.out_token,
|
||||
ud.major_status, ud.minor_status))
|
||||
ud.major_status, ud.minor_status)) {
|
||||
pr_info("%s: gss_write_resv failed\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = SVC_COMPLETE;
|
||||
out:
|
||||
gss_free_in_token_pages(&ud.in_token);
|
||||
gssp_free_upcall_data(&ud);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -53,9 +53,6 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
|
|||
h->last_refresh = now;
|
||||
}
|
||||
|
||||
static inline int cache_is_valid(struct cache_head *h);
|
||||
static void cache_fresh_locked(struct cache_head *head, time_t expiry,
|
||||
struct cache_detail *detail);
|
||||
static void cache_fresh_unlocked(struct cache_head *head,
|
||||
struct cache_detail *detail);
|
||||
|
||||
|
@ -105,9 +102,6 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
|
|||
if (cache_is_expired(detail, tmp)) {
|
||||
hlist_del_init_rcu(&tmp->cache_list);
|
||||
detail->entries --;
|
||||
if (cache_is_valid(tmp) == -EAGAIN)
|
||||
set_bit(CACHE_NEGATIVE, &tmp->flags);
|
||||
cache_fresh_locked(tmp, 0, detail);
|
||||
freeme = tmp;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1337,6 +1337,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|||
auth_stat = rpc_autherr_badcred;
|
||||
auth_res = progp->pg_authenticate(rqstp);
|
||||
}
|
||||
if (auth_res != SVC_OK)
|
||||
trace_svc_authenticate(rqstp, auth_res, auth_stat);
|
||||
switch (auth_res) {
|
||||
case SVC_OK:
|
||||
break;
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#include <trace/events/sunrpc.h>
|
||||
|
||||
#define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
|
||||
|
||||
|
|
|
@ -195,6 +195,7 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
|
|||
pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
|
||||
#endif
|
||||
|
||||
rqst->rq_xtime = ktime_get();
|
||||
rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
|
||||
if (rc) {
|
||||
svc_rdma_send_ctxt_put(rdma, ctxt);
|
||||
|
|
|
@ -233,11 +233,15 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
|||
/* The first SGE contains the transport header, which
|
||||
* remains mapped until @ctxt is destroyed.
|
||||
*/
|
||||
for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
|
||||
for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
|
||||
ib_dma_unmap_page(device,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
trace_svcrdma_dma_unmap_page(rdma,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length);
|
||||
}
|
||||
|
||||
for (i = 0; i < ctxt->sc_page_count; ++i)
|
||||
put_page(ctxt->sc_pages[i]);
|
||||
|
@ -490,6 +494,7 @@ static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
|
|||
dma_addr_t dma_addr;
|
||||
|
||||
dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
|
||||
trace_svcrdma_dma_map_page(rdma, dma_addr, len);
|
||||
if (ib_dma_mapping_error(dev, dma_addr))
|
||||
goto out_maperr;
|
||||
|
||||
|
@ -499,7 +504,6 @@ static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
|
|||
return 0;
|
||||
|
||||
out_maperr:
|
||||
trace_svcrdma_dma_map_page(rdma, page);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -2659,6 +2659,8 @@ static int bc_sendto(struct rpc_rqst *req)
|
|||
.iov_len = sizeof(marker),
|
||||
};
|
||||
|
||||
req->rq_xtime = ktime_get();
|
||||
|
||||
len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
|
||||
if (len != iov.iov_len)
|
||||
return -EAGAIN;
|
||||
|
@ -2684,7 +2686,6 @@ static int bc_send_request(struct rpc_rqst *req)
|
|||
struct svc_xprt *xprt;
|
||||
int len;
|
||||
|
||||
dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
|
||||
/*
|
||||
* Get the server socket associated with this callback xprt
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue
Block a user