Commit 59f0e7eb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-5.10-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "Stable Fixes:
   - Wait for stateid updates after CLOSE/OPEN_DOWNGRADE # v5.4+
   - Fix nfs_path in case of a rename retry
   - Support EXCHID4_FLAG_SUPP_FENCE_OPS v4.2 EXCHANGE_ID flag

  New features and improvements:
   - Replace dprintk() calls with tracepoints
   - Make cache consistency bitmap dynamic
   - Added support for the NFS v4.2 READ_PLUS operation
   - Improvements to net namespace uniquifier

  Other bugfixes and cleanups:
   - Remove redundant clnt pointer
   - Don't update timeout values on connection resets
   - Remove redundant tracepoints
   - Various cleanups to comments
   - Fix oops when trying to use copy_file_range with v4.0 source server
   - Improvements to flexfiles mirrors
   - Add missing 'local_lock=posix' mount option"

* tag 'nfs-for-5.10-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (55 commits)
  NFSv4.2: support EXCHGID4_FLAG_SUPP_FENCE_OPS 4.2 EXCHANGE_ID flag
  NFSv4: Fix up RCU annotations for struct nfs_netns_client
  NFS: Only reference user namespace from nfs4idmap struct instead of cred
  nfs: add missing "posix" local_lock constant table definition
  NFSv4: Use the net namespace uniquifier if it is set
  NFSv4: Clean up initialisation of uniquified client id strings
  NFS: Decode a full READ_PLUS reply
  SUNRPC: Add an xdr_align_data() function
  NFS: Add READ_PLUS hole segment decoding
  SUNRPC: Add the ability to expand holes in data pages
  SUNRPC: Split out _shift_data_right_tail()
  SUNRPC: Split out xdr_realign_pages() from xdr_align_pages()
  NFS: Add READ_PLUS data segment support
  NFS: Use xdr_page_pos() in NFSv4 decode_getacl()
  SUNRPC: Implement a xdr_page_pos() function
  SUNRPC: Split out a function for setting current page
  NFS: fix nfs_path in case of a rename retry
  fs: nfs: return per memcg count for xattr shrinkers
  NFSv4: Wait for stateid updates after CLOSE/OPEN_DOWNGRADE
  nfs: remove incorrect fallthrough label
  ...
parents 4962a856 8c39076c
...@@ -417,7 +417,7 @@ void nsm_release(struct nsm_handle *nsm) ...@@ -417,7 +417,7 @@ void nsm_release(struct nsm_handle *nsm)
/* /*
* XDR functions for NSM. * XDR functions for NSM.
* *
* See http://www.opengroup.org/ for details on the Network * See https://www.opengroup.org/ for details on the Network
* Status Monitor wire protocol. * Status Monitor wire protocol.
*/ */
......
...@@ -94,6 +94,7 @@ enum { ...@@ -94,6 +94,7 @@ enum {
static const struct constant_table nfs_param_enums_local_lock[] = { static const struct constant_table nfs_param_enums_local_lock[] = {
{ "all", Opt_local_lock_all }, { "all", Opt_local_lock_all },
{ "flock", Opt_local_lock_flock }, { "flock", Opt_local_lock_flock },
{ "posix", Opt_local_lock_posix },
{ "none", Opt_local_lock_none }, { "none", Opt_local_lock_none },
{} {}
}; };
......
...@@ -32,9 +32,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; ...@@ -32,9 +32,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
/* /*
* nfs_path - reconstruct the path given an arbitrary dentry * nfs_path - reconstruct the path given an arbitrary dentry
* @base - used to return pointer to the end of devname part of path * @base - used to return pointer to the end of devname part of path
* @dentry - pointer to dentry * @dentry_in - pointer to dentry
* @buffer - result buffer * @buffer - result buffer
* @buflen - length of buffer * @buflen_in - length of buffer
* @flags - options (see below) * @flags - options (see below)
* *
* Helper function for constructing the server pathname * Helper function for constructing the server pathname
...@@ -49,15 +49,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; ...@@ -49,15 +49,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
* the original device (export) name * the original device (export) name
* (if unset, the original name is returned verbatim) * (if unset, the original name is returned verbatim)
*/ */
char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, char *nfs_path(char **p, struct dentry *dentry_in, char *buffer,
unsigned flags) ssize_t buflen_in, unsigned flags)
{ {
char *end; char *end;
int namelen; int namelen;
unsigned seq; unsigned seq;
const char *base; const char *base;
struct dentry *dentry;
ssize_t buflen;
rename_retry: rename_retry:
buflen = buflen_in;
dentry = dentry_in;
end = buffer+buflen; end = buffer+buflen;
*--end = '\0'; *--end = '\0';
buflen--; buflen--;
......
...@@ -67,7 +67,6 @@ struct nfs4_xattr_bucket { ...@@ -67,7 +67,6 @@ struct nfs4_xattr_bucket {
struct nfs4_xattr_cache { struct nfs4_xattr_cache {
struct kref ref; struct kref ref;
spinlock_t hash_lock; /* protects hashtable and lru */
struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
struct list_head lru; struct list_head lru;
struct list_head dispose; struct list_head dispose;
...@@ -882,7 +881,7 @@ nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc) ...@@ -882,7 +881,7 @@ nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{ {
unsigned long count; unsigned long count;
count = list_lru_count(&nfs4_xattr_cache_lru); count = list_lru_shrink_count(&nfs4_xattr_cache_lru, sc);
return vfs_pressure_ratio(count); return vfs_pressure_ratio(count);
} }
...@@ -976,7 +975,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) ...@@ -976,7 +975,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
count = list_lru_count(lru); count = list_lru_shrink_count(lru, sc);
return vfs_pressure_ratio(count); return vfs_pressure_ratio(count);
} }
......
...@@ -45,6 +45,15 @@ ...@@ -45,6 +45,15 @@
#define encode_deallocate_maxsz (op_encode_hdr_maxsz + \ #define encode_deallocate_maxsz (op_encode_hdr_maxsz + \
encode_fallocate_maxsz) encode_fallocate_maxsz)
#define decode_deallocate_maxsz (op_decode_hdr_maxsz) #define decode_deallocate_maxsz (op_decode_hdr_maxsz)
#define encode_read_plus_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
#define NFS42_READ_PLUS_SEGMENT_SIZE (1 /* data_content4 */ + \
2 /* data_info4.di_offset */ + \
2 /* data_info4.di_length */)
#define decode_read_plus_maxsz (op_decode_hdr_maxsz + \
1 /* rpr_eof */ + \
1 /* rpr_contents count */ + \
2 * NFS42_READ_PLUS_SEGMENT_SIZE)
#define encode_seek_maxsz (op_encode_hdr_maxsz + \ #define encode_seek_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + \ encode_stateid_maxsz + \
2 /* offset */ + \ 2 /* offset */ + \
...@@ -128,6 +137,14 @@ ...@@ -128,6 +137,14 @@
decode_putfh_maxsz + \ decode_putfh_maxsz + \
decode_deallocate_maxsz + \ decode_deallocate_maxsz + \
decode_getattr_maxsz) decode_getattr_maxsz)
#define NFS4_enc_read_plus_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_read_plus_maxsz)
#define NFS4_dec_read_plus_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_read_plus_maxsz)
#define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \ #define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \ encode_sequence_maxsz + \
encode_putfh_maxsz + \ encode_putfh_maxsz + \
...@@ -324,6 +341,16 @@ static void encode_deallocate(struct xdr_stream *xdr, ...@@ -324,6 +341,16 @@ static void encode_deallocate(struct xdr_stream *xdr,
encode_fallocate(xdr, args); encode_fallocate(xdr, args);
} }
static void encode_read_plus(struct xdr_stream *xdr,
const struct nfs_pgio_args *args,
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_READ_PLUS, decode_read_plus_maxsz, hdr);
encode_nfs4_stateid(xdr, &args->stateid);
encode_uint64(xdr, args->offset);
encode_uint32(xdr, args->count);
}
static void encode_seek(struct xdr_stream *xdr, static void encode_seek(struct xdr_stream *xdr,
const struct nfs42_seek_args *args, const struct nfs42_seek_args *args,
struct compound_hdr *hdr) struct compound_hdr *hdr)
...@@ -722,6 +749,28 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req, ...@@ -722,6 +749,28 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
encode_nops(&hdr); encode_nops(&hdr);
} }
/*
* Encode READ_PLUS request
*/
static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
struct xdr_stream *xdr,
const void *data)
{
const struct nfs_pgio_args *args = data;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_read_plus(xdr, args, &hdr);
rpc_prepare_reply_pages(req, args->pages, args->pgbase,
args->count, hdr.replen);
encode_nops(&hdr);
}
/* /*
* Encode SEEK request * Encode SEEK request
*/ */
...@@ -970,6 +1019,97 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re ...@@ -970,6 +1019,97 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re
return decode_op_hdr(xdr, OP_DEALLOCATE); return decode_op_hdr(xdr, OP_DEALLOCATE);
} }
static int decode_read_plus_data(struct xdr_stream *xdr, struct nfs_pgio_res *res,
uint32_t *eof)
{
uint32_t count, recvd;
uint64_t offset;
__be32 *p;
p = xdr_inline_decode(xdr, 8 + 4);
if (unlikely(!p))
return -EIO;
p = xdr_decode_hyper(p, &offset);
count = be32_to_cpup(p);
recvd = xdr_align_data(xdr, res->count, count);
res->count += recvd;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %u > recvd %u\n", count, recvd);
*eof = 0;
return 1;
}
return 0;
}
static int decode_read_plus_hole(struct xdr_stream *xdr, struct nfs_pgio_res *res,
uint32_t *eof)
{
uint64_t offset, length, recvd;
__be32 *p;
p = xdr_inline_decode(xdr, 8 + 8);
if (unlikely(!p))
return -EIO;
p = xdr_decode_hyper(p, &offset);
p = xdr_decode_hyper(p, &length);
recvd = xdr_expand_hole(xdr, res->count, length);
res->count += recvd;
if (recvd < length) {
*eof = 0;
return 1;
}
return 0;
}
static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
{
uint32_t eof, segments, type;
int status, i;
__be32 *p;
status = decode_op_hdr(xdr, OP_READ_PLUS);
if (status)
return status;
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(!p))
return -EIO;
eof = be32_to_cpup(p++);
segments = be32_to_cpup(p++);
if (segments == 0)
goto out;
for (i = 0; i < segments; i++) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
return -EIO;
type = be32_to_cpup(p++);
if (type == NFS4_CONTENT_DATA)
status = decode_read_plus_data(xdr, res, &eof);
else if (type == NFS4_CONTENT_HOLE)
status = decode_read_plus_hole(xdr, res, &eof);
else
return -EINVAL;
if (status < 0)
return status;
if (status > 0)
break;
}
out:
res->eof = eof;
return 0;
}
static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res) static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
{ {
int status; int status;
...@@ -1146,6 +1286,33 @@ static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp, ...@@ -1146,6 +1286,33 @@ static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
return status; return status;
} }
/*
* Decode READ_PLUS request
*/
static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *data)
{
struct nfs_pgio_res *res = data;
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_read_plus(xdr, res);
if (!status)
status = res->count;
out:
return status;
}
/* /*
* Decode SEEK request * Decode SEEK request
*/ */
......
...@@ -599,6 +599,14 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat ...@@ -599,6 +599,14 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat
return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0; return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
} }
static inline bool nfs4_stateid_is_next(const nfs4_stateid *s1, const nfs4_stateid *s2)
{
u32 seq1 = be32_to_cpu(s1->seqid);
u32 seq2 = be32_to_cpu(s2->seqid);
return seq2 == seq1 + 1U || (seq2 == 1U && seq1 == 0xffffffffU);
}
static inline bool nfs4_stateid_match_or_older(const nfs4_stateid *dst, const nfs4_stateid *src) static inline bool nfs4_stateid_match_or_older(const nfs4_stateid *dst, const nfs4_stateid *src)
{ {
return nfs4_stateid_match_other(dst, src) && return nfs4_stateid_match_other(dst, src) &&
......
...@@ -1045,6 +1045,8 @@ static int nfs4_server_common_setup(struct nfs_server *server, ...@@ -1045,6 +1045,8 @@ static int nfs4_server_common_setup(struct nfs_server *server,
server->caps |= server->nfs_client->cl_mvops->init_caps; server->caps |= server->nfs_client->cl_mvops->init_caps;
if (server->flags & NFS_MOUNT_NORDIRPLUS) if (server->flags & NFS_MOUNT_NORDIRPLUS)
server->caps &= ~NFS_CAP_READDIRPLUS; server->caps &= ~NFS_CAP_READDIRPLUS;
if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
server->caps &= ~NFS_CAP_READ_PLUS;
/* /*
* Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
* authentication. * authentication.
......
...@@ -145,7 +145,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in, ...@@ -145,7 +145,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
/* Only offload copy if superblock is the same */ /* Only offload copy if superblock is the same */
if (file_in->f_op != &nfs4_file_operations) if (file_in->f_op != &nfs4_file_operations)
return -EXDEV; return -EXDEV;
if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY)) if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) ||
!nfs_server_capable(file_inode(file_in), NFS_CAP_COPY))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (file_inode(file_in) == file_inode(file_out)) if (file_inode(file_in) == file_inode(file_out))
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <keys/user-type.h> #include <keys/user-type.h>
#include <keys/request_key_auth-type.h> #include <keys/request_key_auth-type.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/user_namespace.h>
#include "internal.h" #include "internal.h"
#include "netns.h" #include "netns.h"
...@@ -69,13 +70,13 @@ struct idmap { ...@@ -69,13 +70,13 @@ struct idmap {
struct rpc_pipe *idmap_pipe; struct rpc_pipe *idmap_pipe;
struct idmap_legacy_upcalldata *idmap_upcall_data; struct idmap_legacy_upcalldata *idmap_upcall_data;
struct mutex idmap_mutex; struct mutex idmap_mutex;
const struct cred *cred; struct user_namespace *user_ns;
}; };
static struct user_namespace *idmap_userns(const struct idmap *idmap) static struct user_namespace *idmap_userns(const struct idmap *idmap)
{ {
if (idmap && idmap->cred) if (idmap && idmap->user_ns)
return idmap->cred->user_ns; return idmap->user_ns;
return &init_user_ns; return &init_user_ns;
} }
...@@ -286,7 +287,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen, ...@@ -286,7 +287,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
if (!idmap->cred || idmap->cred->user_ns == &init_user_ns) if (!idmap->user_ns || idmap->user_ns == &init_user_ns)
rkey = request_key(&key_type_id_resolver, desc, ""); rkey = request_key(&key_type_id_resolver, desc, "");
if (IS_ERR(rkey)) { if (IS_ERR(rkey)) {
mutex_lock(&idmap->idmap_mutex); mutex_lock(&idmap->idmap_mutex);
...@@ -462,7 +463,7 @@ nfs_idmap_new(struct nfs_client *clp) ...@@ -462,7 +463,7 @@ nfs_idmap_new(struct nfs_client *clp)
return -ENOMEM; return -ENOMEM;
mutex_init(&idmap->idmap_mutex); mutex_init(&idmap->idmap_mutex);
idmap->cred = get_cred(clp->cl_rpcclient->cl_cred); idmap->user_ns = get_user_ns(clp->cl_rpcclient->cl_cred->user_ns);
rpc_init_pipe_dir_object(&idmap->idmap_pdo, rpc_init_pipe_dir_object(&idmap->idmap_pdo,
&nfs_idmap_pipe_dir_object_ops, &nfs_idmap_pipe_dir_object_ops,
...@@ -486,7 +487,7 @@ nfs_idmap_new(struct nfs_client *clp) ...@@ -486,7 +487,7 @@ nfs_idmap_new(struct nfs_client *clp)
err_destroy_pipe: err_destroy_pipe:
rpc_destroy_pipe_data(idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe);
err: err:
put_cred(idmap->cred); get_user_ns(idmap->user_ns);
kfree(idmap); kfree(idmap);
return error; return error;
} }
...@@ -503,7 +504,7 @@ nfs_idmap_delete(struct nfs_client *clp) ...@@ -503,7 +504,7 @@ nfs_idmap_delete(struct nfs_client *clp)
&clp->cl_rpcclient->cl_pipedir_objects, &clp->cl_rpcclient->cl_pipedir_objects,
&idmap->idmap_pdo); &idmap->idmap_pdo);
rpc_destroy_pipe_data(idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe);
put_cred(idmap->cred); put_user_ns(idmap->user_ns);
kfree(idmap); kfree(idmap);
} }
......
This diff is collapsed.
...@@ -1511,6 +1511,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr); ...@@ -1511,6 +1511,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
DECLARE_EVENT_CLASS(nfs4_getattr_event, DECLARE_EVENT_CLASS(nfs4_getattr_event,
TP_PROTO( TP_PROTO(
......
...@@ -5308,7 +5308,6 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, ...@@ -5308,7 +5308,6 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
uint32_t attrlen, uint32_t attrlen,
bitmap[3] = {0}; bitmap[3] = {0};
int status; int status;
unsigned int pg_offset;
res->acl_len = 0; res->acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
...@@ -5316,9 +5315,6 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, ...@@ -5316,9 +5315,6 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
xdr_enter_page(xdr, xdr->buf->page_len); xdr_enter_page(xdr, xdr->buf->page_len);
/* Calculate the offset of the page data */
pg_offset = xdr->buf->head[0].iov_len;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out; goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
...@@ -5331,7 +5327,7 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, ...@@ -5331,7 +5327,7 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
/* The bitmap (xdr len + bitmaps) and the attr xdr len words /* The bitmap (xdr len + bitmaps) and the attr xdr len words
* are stored with the acl data to handle the problem of * are stored with the acl data to handle the problem of
* variable length bitmaps.*/ * variable length bitmaps.*/
res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset; res->acl_data_offset = xdr_page_pos(xdr);
res->acl_len = attrlen; res->acl_len = attrlen;
/* Check for receive buffer overflow */ /* Check for receive buffer overflow */
...@@ -7619,6 +7615,7 @@ const struct rpc_procinfo nfs4_procedures[] = { ...@@ -7619,6 +7615,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(SETXATTR, enc_setxattr, dec_setxattr), PROC42(SETXATTR, enc_setxattr, dec_setxattr),
PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs), PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs),
PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr), PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr),
PROC42(READ_PLUS, enc_read_plus, dec_read_plus),
}; };
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
......
...@@ -902,7 +902,7 @@ pnfs_destroy_layouts_byclid(struct nfs_client *clp, ...@@ -902,7 +902,7 @@ pnfs_destroy_layouts_byclid(struct nfs_client *clp,
} }
/* /*
* Called by the state manger to remove all layouts established under an * Called by the state manager to remove all layouts established under an
* expired lease. * expired lease.
*/ */
void void
......
...@@ -889,7 +889,7 @@ static struct nfs_server *nfs_try_mount_request(struct fs_context *fc) ...@@ -889,7 +889,7 @@ static struct nfs_server *nfs_try_mount_request(struct fs_context *fc)
default: default:
if (rpcauth_get_gssinfo(flavor, &info) != 0) if (rpcauth_get_gssinfo(flavor, &info) != 0)
continue; continue;
/* Fallthrough */ break;
} }
dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", flavor); dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", flavor);
ctx->selected_flavor = flavor; ctx->selected_flavor = flavor;
......
...@@ -79,7 +79,12 @@ static ssize_t nfs_netns_identifier_show(struct kobject *kobj, ...@@ -79,7 +79,12 @@ static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
struct nfs_netns_client *c = container_of(kobj, struct nfs_netns_client *c = container_of(kobj,
struct nfs_netns_client, struct nfs_netns_client,
kobject); kobject);
return scnprintf(buf, PAGE_SIZE, "%s\n", c->identifier); ssize_t ret;
rcu_read_lock();
ret = scnprintf(buf, PAGE_SIZE, "%s\n", rcu_dereference(c->identifier));
rcu_read_unlock();
return ret;
} }
/* Strip trailing '\n' */ /* Strip trailing '\n' */
...@@ -107,7 +112,7 @@ static ssize_t nfs_netns_identifier_store(struct kobject *kobj, ...@@ -107,7 +112,7 @@ static ssize_t nfs_netns_identifier_store(struct kobject *kobj,
p = kmemdup_nul(buf, len, GFP_KERNEL); p = kmemdup_nul(buf, len, GFP_KERNEL);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
old = xchg(&c->identifier, p); old = rcu_dereference_protected(xchg(&c->identifier, (char __rcu *)p), 1);
if (old) { if (old) {
synchronize_rcu(); synchronize_rcu();
kfree(old); kfree(old);
...@@ -121,7 +126,7 @@ static void nfs_netns_client_release(struct kobject *kobj) ...@@ -121,7 +126,7 @@ static void nfs_netns_client_release(struct kobject *kobj)
struct nfs_netns_client, struct nfs_netns_client,
kobject); kobject);
kfree(c->identifier); kfree(rcu_dereference_raw(c->identifier));
kfree(c); kfree(c);
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
struct nfs_netns_client { struct nfs_netns_client {
struct kobject kobject; struct kobject kobject;
struct net *net; struct net *net;
const char *identifier; const char __rcu *identifier;
}; };
extern struct kobject *nfs_client_kobj; extern struct kobject *nfs_client_kobj;
......
...@@ -551,13 +551,13 @@ enum { ...@@ -551,13 +551,13 @@ enum {
NFSPROC4_CLNT_LOOKUPP, NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LAYOUTERROR, NFSPROC4_CLNT_LAYOUTERROR,
NFSPROC4_CLNT_COPY_NOTIFY, NFSPROC4_CLNT_COPY_NOTIFY,
NFSPROC4_CLNT_GETXATTR, NFSPROC4_CLNT_GETXATTR,
NFSPROC4_CLNT_SETXATTR, NFSPROC4_CLNT_SETXATTR,
NFSPROC4_CLNT_LISTXATTRS, NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR, NFSPROC4_CLNT_REMOVEXATTR,
NFSPROC4_CLNT_READ_PLUS,
}; };
/* nfs41 types */ /* nfs41 types */
......
...@@ -287,5 +287,6 @@ struct nfs_server { ...@@ -287,5 +287,6 @@ struct nfs_server {
#define NFS_CAP_LAYOUTERROR (1U << 26) #define NFS_CAP_LAYOUTERROR (1U << 26)
#define NFS_CAP_COPY_NOTIFY (1U << 27) #define NFS_CAP_COPY_NOTIFY (1U << 27)
#define NFS_CAP_XATTR (1U << 28) #define NFS_CAP_XATTR (1U << 28)
#define NFS_CAP_READ_PLUS (1U << 29)
#endif #endif
...@@ -525,7 +525,7 @@ struct nfs_closeargs { ...@@ -525,7 +525,7 @@ struct nfs_closeargs {
struct nfs_seqid * seqid; struct nfs_seqid * seqid;
fmode_t fmode; fmode_t fmode;
u32 share_access; u32 share_access;
const u32 * bitmask; u32 * bitmask;
struct nfs4_layoutreturn_args *lr_args; struct nfs4_layoutreturn_args *lr_args;
}; };
...@@ -608,7 +608,7 @@ struct nfs4_delegreturnargs { ...@@ -608,7 +608,7 @@ struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args; struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle; const struct nfs_fh *fhandle;
const nfs4_stateid *stateid; const nfs4_stateid *stateid;
const u32 * bitmask; u32 * bitmask;
struct nfs4_layoutreturn_args *lr_args; struct nfs4_layoutreturn_args *lr_args;
}; };
...@@ -648,7 +648,7 @@ struct nfs_pgio_args { ...@@ -648,7 +648,7 @@ struct nfs_pgio_args {
union { union {
unsigned int replen; /* used by read */ unsigned int replen; /* used by read */
struct { struct {
const u32 * bitmask; /* used by write */ u32 * bitmask; /* used by write */
enum nfs3_stable_how stable; /* used by write */ enum nfs3_stable_how stable; /* used by write */
}; };
}; };
...@@ -657,7 +657,7 @@ struct nfs_pgio_args { ...@@ -657,7 +657,7 @@ struct nfs_pgio_args {
struct nfs_pgio_res { struct nfs_pgio_res {
struct nfs4_sequence_res seq_res; struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr; struct nfs_fattr * fattr;
__u32 count; __u64 count;
__u32 op_status; __u32 op_status;
union { union {
struct { struct {
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
NetApp provides this source code under the GPL v2 License. NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php. https://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
......
...@@ -45,7 +45,8 @@ ...@@ -45,7 +45,8 @@
*/ */
struct cache_head { struct cache_head {
struct hlist_node cache_list; struct hlist_node cache_list;
time64_t expiry_time; /* After time time, don't use the data */ time64_t expiry_time; /* After time expiry_time, don't use
* the data */
time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* sent, else this is when update was * sent, else this is when update was
* received, though it is alway set to * received, though it is alway set to
......
...@@ -143,7 +143,7 @@ typedef __be32 rpc_fraghdr; ...@@ -143,7 +143,7 @@ typedef __be32 rpc_fraghdr;
/* /*
* Well-known netids. See: * Well-known netids. See:
* *
* http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml * https://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/ */
#define RPCBIND_NETID_UDP "udp" #define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp" #define RPCBIND_NETID_TCP "tcp"
......
...@@ -240,6 +240,7 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); ...@@ -240,6 +240,7 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len); unsigned int base, unsigned int len);
extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
extern unsigned int xdr_page_pos(const struct xdr_stream *xdr);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst); __be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
...@@ -249,6 +250,8 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); ...@@ -249,6 +250,8 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
extern uint64_t xdr_align_data(struct xdr_stream *, uint64_t, uint32_t);
extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t);
/** /**
* xdr_stream_remaining - Return the number of bytes remaining in the stream * xdr_stream_remaining - Return the number of bytes remaining in the stream
......
...@@ -424,7 +424,6 @@ DEFINE_CONN_EVENT(connect); ...@@ -424,7 +424,6 @@ DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect); DEFINE_CONN_EVENT(disconnect);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
TRACE_EVENT(xprtrdma_op_connect, TRACE_EVENT(xprtrdma_op_connect,
TP_PROTO( TP_PROTO(
...@@ -1188,68 +1187,6 @@ TRACE_EVENT(xprtrdma_decode_seg, ...@@ -1188,68 +1187,6 @@ TRACE_EVENT(xprtrdma_decode_seg,
) )
); );
/**
** Allocation/release of rpcrdma_reqs and rpcrdma_reps
**/
TRACE_EVENT(xprtrdma_op_allocate,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
),
TP_ARGS(task, req),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(const void *, req)
__field(size_t, callsize)
__field(size_t, rcvsize)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->req = req;
__entry->callsize = task->tk_rqstp->rq_callsize;
__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
),
TP_printk("task:%u@%u req=%p (%zu, %zu)",
__entry->task_id, __entry->client_id,
__entry->req, __entry->callsize, __entry->rcvsize
)
);
TRACE_EVENT(xprtrdma_op_free,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
),
TP_ARGS(task, req),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(const void *, req)
__field(const void *, rep)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->req = req;
__entry->rep = req->rl_reply;
),
TP_printk("task:%u@%u req=%p rep=%p",
__entry->task_id, __entry->client_id,
__entry->req, __entry->rep
)
);
/** /**
** Callback events ** Callback events
**/ **/
......
...@@ -259,8 +259,10 @@ DECLARE_EVENT_CLASS(rpc_task_status, ...@@ -259,8 +259,10 @@ DECLARE_EVENT_CLASS(rpc_task_status,
TP_ARGS(task)) TP_ARGS(task))
DEFINE_RPC_STATUS_EVENT(call); DEFINE_RPC_STATUS_EVENT(call);
DEFINE_RPC_STATUS_EVENT(bind);
DEFINE_RPC_STATUS_EVENT(connect); DEFINE_RPC_STATUS_EVENT(connect);
DEFINE_RPC_STATUS_EVENT(timeout);
DEFINE_RPC_STATUS_EVENT(retry_refresh);
DEFINE_RPC_STATUS_EVENT(refresh);
TRACE_EVENT(rpc_request, TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task), TP_PROTO(const struct rpc_task *task),
...@@ -385,7 +387,10 @@ DECLARE_EVENT_CLASS(rpc_task_running, ...@@ -385,7 +387,10 @@ DECLARE_EVENT_CLASS(rpc_task_running,
DEFINE_RPC_RUNNING_EVENT(begin); DEFINE_RPC_RUNNING_EVENT(begin);
DEFINE_RPC_RUNNING_EVENT(run_action); DEFINE_RPC_RUNNING_EVENT(run_action);
DEFINE_RPC_RUNNING_EVENT(sync_sleep);
DEFINE_RPC_RUNNING_EVENT(sync_wake);
DEFINE_RPC_RUNNING_EVENT(complete); DEFINE_RPC_RUNNING_EVENT(complete);
DEFINE_RPC_RUNNING_EVENT(timeout);
DEFINE_RPC_RUNNING_EVENT(signalled); DEFINE_RPC_RUNNING_EVENT(signalled);
DEFINE_RPC_RUNNING_EVENT(end); DEFINE_RPC_RUNNING_EVENT(end);
...@@ -517,6 +522,49 @@ DEFINE_RPC_REPLY_EVENT(stale_creds); ...@@ -517,6 +522,49 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds); DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak); DEFINE_RPC_REPLY_EVENT(auth_tooweak);
#define DEFINE_RPCB_ERROR_EVENT(name) \
DEFINE_EVENT(rpc_reply_event, rpcb_##name##_err, \
TP_PROTO( \
const struct rpc_task *task \
), \
TP_ARGS(task))
DEFINE_RPCB_ERROR_EVENT(prog_unavail);
DEFINE_RPCB_ERROR_EVENT(timeout);
DEFINE_RPCB_ERROR_EVENT(bind_version);
DEFINE_RPCB_ERROR_EVENT(unreachable);
DEFINE_RPCB_ERROR_EVENT(unrecognized);
TRACE_EVENT(rpc_buf_alloc,
TP_PROTO(
const struct rpc_task *task,
int status
),
TP_ARGS(task, status),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(size_t, callsize)
__field(size_t, recvsize)
__field(int, status)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->callsize = task->tk_rqstp->rq_callsize;
__entry->recvsize = task->tk_rqstp->rq_rcvsize;
__entry->status = status;
),
TP_printk("task:%u@%u callsize=%zu recvsize=%zu status=%d",
__entry->task_id, __entry->client_id,
__entry->callsize, __entry->recvsize, __entry->status
)
);
TRACE_EVENT(rpc_call_rpcerror, TRACE_EVENT(rpc_call_rpcerror,
TP_PROTO( TP_PROTO(
const struct rpc_task *task, const struct rpc_task *task,
...@@ -868,6 +916,34 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); ...@@ -868,6 +916,34 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
TRACE_EVENT(rpc_socket_nospace,
TP_PROTO(
const struct rpc_rqst *rqst,
const struct sock_xprt *transport
),
TP_ARGS(rqst, transport),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned int, total)
__field(unsigned int, remaining)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->total = rqst->rq_slen;
__entry->remaining = rqst->rq_slen - transport->xmit.offset;
),
TP_printk("task:%u@%u total=%u remaining=%u",
__entry->task_id, __entry->client_id,
__entry->total, __entry->remaining
)
);
TRACE_DEFINE_ENUM(XPRT_LOCKED); TRACE_DEFINE_ENUM(XPRT_LOCKED);
TRACE_DEFINE_ENUM(XPRT_CONNECTED); TRACE_DEFINE_ENUM(XPRT_CONNECTED);
TRACE_DEFINE_ENUM(XPRT_CONNECTING); TRACE_DEFINE_ENUM(XPRT_CONNECTING);
...@@ -925,6 +1001,7 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class, ...@@ -925,6 +1001,7 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_ARGS(xprt)) TP_ARGS(xprt))
DEFINE_RPC_XPRT_LIFETIME_EVENT(create); DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
...@@ -969,7 +1046,6 @@ DECLARE_EVENT_CLASS(rpc_xprt_event, ...@@ -969,7 +1046,6 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
DEFINE_RPC_XPRT_EVENT(timer); DEFINE_RPC_XPRT_EVENT(timer);
DEFINE_RPC_XPRT_EVENT(lookup_rqst); DEFINE_RPC_XPRT_EVENT(lookup_rqst);
DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_transmit, TRACE_EVENT(xprt_transmit,
TP_PROTO( TP_PROTO(
...@@ -1002,37 +1078,6 @@ TRACE_EVENT(xprt_transmit, ...@@ -1002,37 +1078,6 @@ TRACE_EVENT(xprt_transmit,
__entry->seqno, __entry->status) __entry->seqno, __entry->status)
); );
TRACE_EVENT(xprt_enq_xmit,
TP_PROTO(
const struct rpc_task *task,
int stage
),
TP_ARGS(task, stage),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, seqno)
__field(int, stage)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->seqno = task->tk_rqstp->rq_seqno;
__entry->stage = stage;
),
TP_printk(
"task:%u@%u xid=0x%08x seqno=%u stage=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->stage)
);
TRACE_EVENT(xprt_ping, TRACE_EVENT(xprt_ping,
TP_PROTO(const struct rpc_xprt *xprt, int status), TP_PROTO(const struct rpc_xprt *xprt, int status),
...@@ -1095,6 +1140,7 @@ DECLARE_EVENT_CLASS(xprt_writelock_event, ...@@ -1095,6 +1140,7 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
DEFINE_WRITELOCK_EVENT(reserve_xprt); DEFINE_WRITELOCK_EVENT(reserve_xprt);
DEFINE_WRITELOCK_EVENT(release_xprt); DEFINE_WRITELOCK_EVENT(release_xprt);
DEFINE_WRITELOCK_EVENT(transmit_queued);
DECLARE_EVENT_CLASS(xprt_cong_event, DECLARE_EVENT_CLASS(xprt_cong_event,
TP_PROTO( TP_PROTO(
...@@ -1147,6 +1193,30 @@ DEFINE_CONG_EVENT(release_cong); ...@@ -1147,6 +1193,30 @@ DEFINE_CONG_EVENT(release_cong);
DEFINE_CONG_EVENT(get_cong); DEFINE_CONG_EVENT(get_cong);
DEFINE_CONG_EVENT(put_cong); DEFINE_CONG_EVENT(put_cong);
TRACE_EVENT(xprt_reserve,
TP_PROTO(
const struct rpc_rqst *rqst
),
TP_ARGS(rqst),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk("task:%u@%u xid=0x%08x",
__entry->task_id, __entry->client_id, __entry->xid
)
);
TRACE_EVENT(xs_stream_read_data, TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total), TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
...@@ -1202,6 +1272,156 @@ TRACE_EVENT(xs_stream_read_request, ...@@ -1202,6 +1272,156 @@ TRACE_EVENT(xs_stream_read_request,
__entry->copied, __entry->reclen, __entry->offset) __entry->copied, __entry->reclen, __entry->offset)
); );
TRACE_EVENT(rpcb_getport,
TP_PROTO(
const struct rpc_clnt *clnt,
const struct rpc_task *task,
unsigned int bind_version
),
TP_ARGS(clnt, task, bind_version),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned int, program)
__field(unsigned int, version)
__field(int, protocol)
__field(unsigned int, bind_version)
__string(servername, task->tk_xprt->servername)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = clnt->cl_clid;
__entry->program = clnt->cl_prog;
__entry->version = clnt->cl_vers;
__entry->protocol = task->tk_xprt->prot;
__entry->bind_version = bind_version;
__assign_str(servername, task->tk_xprt->servername);
),
TP_printk("task:%u@%u server=%s program=%u version=%u protocol=%d bind_version=%u",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->program, __entry->version, __entry->protocol,
__entry->bind_version
)
);
TRACE_EVENT(rpcb_setport,
TP_PROTO(
const struct rpc_task *task,
int status,
unsigned short port
),
TP_ARGS(task, status, port),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, status)
__field(unsigned short, port)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->status = status;
__entry->port = port;
),
TP_printk("task:%u@%u status=%d port=%u",
__entry->task_id, __entry->client_id,
__entry->status, __entry->port
)
);
TRACE_EVENT(pmap_register,
TP_PROTO(
u32 program,
u32 version,
int protocol,
unsigned short port
),
TP_ARGS(program, version, protocol, port),
TP_STRUCT__entry(
__field(unsigned int, program)
__field(unsigned int, version)
__field(int, protocol)
__field(unsigned int, port)
),
TP_fast_assign(
__entry->program = program;
__entry->version = version;
__entry->protocol = protocol;
__entry->port = port;
),
TP_printk("program=%u version=%u protocol=%d port=%u",
__entry->program, __entry->version,
__entry->protocol, __entry->port
)
);
TRACE_EVENT(rpcb_register,
TP_PROTO(
u32 program,
u32 version,
const char *addr,
const char *netid
),
TP_ARGS(program, version, addr, netid),
TP_STRUCT__entry(
__field(unsigned int, program)
__field(unsigned int, version)
__string(addr, addr)
__string(netid, netid)
),
TP_fast_assign(
__entry->program = program;
__entry->version = version;
__assign_str(addr, addr);
__assign_str(netid, netid);
),
TP_printk("program=%u version=%u addr=%s netid=%s",
__entry->program, __entry->version,
__get_str(addr), __get_str(netid)
)
);
TRACE_EVENT(rpcb_unregister,
TP_PROTO(
u32 program,
u32 version,
const char *netid
),
TP_ARGS(program, version, netid),
TP_STRUCT__entry(
__field(unsigned int, program)
__field(unsigned int, version)
__string(netid, netid)
),
TP_fast_assign(
__entry->program = program;
__entry->version = version;
__assign_str(netid, netid);
),
TP_printk("program=%u version=%u netid=%s",
__entry->program, __entry->version, __get_str(netid)
)
);
DECLARE_EVENT_CLASS(svc_xdr_buf_class, DECLARE_EVENT_CLASS(svc_xdr_buf_class,
TP_PROTO( TP_PROTO(
......
...@@ -139,6 +139,8 @@ ...@@ -139,6 +139,8 @@
#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004
/* /*
* Since the validity of these bits depends on whether * Since the validity of these bits depends on whether
* they're set in the argument or response, have separate * they're set in the argument or response, have separate
...@@ -146,6 +148,7 @@ ...@@ -146,6 +148,7 @@
*/ */
#define EXCHGID4_FLAG_MASK_A 0x40070103 #define EXCHGID4_FLAG_MASK_A 0x40070103
#define EXCHGID4_FLAG_MASK_R 0x80070103 #define EXCHGID4_FLAG_MASK_R 0x80070103
#define EXCHGID4_2_FLAG_MASK_R 0x80070107
#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
NetApp provides this source code under the GPL v2 License. NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php. https://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
......
...@@ -47,10 +47,6 @@ ...@@ -47,10 +47,6 @@
# define RPCDBG_FACILITY RPCDBG_CALL # define RPCDBG_FACILITY RPCDBG_CALL
#endif #endif
#define dprint_status(t) \
dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
__func__, t->tk_status)
/* /*
* All RPC clients are linked into this list * All RPC clients are linked into this list
*/ */
...@@ -1639,10 +1635,6 @@ call_start(struct rpc_task *task) ...@@ -1639,10 +1635,6 @@ call_start(struct rpc_task *task)
int idx = task->tk_msg.rpc_proc->p_statidx; int idx = task->tk_msg.rpc_proc->p_statidx;
trace_rpc_request(task); trace_rpc_request(task);
dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
clnt->cl_program->name, clnt->cl_vers,
rpc_proc_name(task),
(RPC_IS_ASYNC(task) ? "async" : "sync"));
/* Increment call count (version might not be valid for ping) */ /* Increment call count (version might not be valid for ping) */
if (clnt->cl_program->version[clnt->cl_vers]) if (clnt->cl_program->version[clnt->cl_vers])
...@@ -1658,8 +1650,6 @@ call_start(struct rpc_task *task) ...@@ -1658,8 +1650,6 @@ call_start(struct rpc_task *task)
static void static void
call_reserve(struct rpc_task *task) call_reserve(struct rpc_task *task)
{ {
dprint_status(task);
task->tk_status = 0; task->tk_status = 0;
task->tk_action = call_reserveresult; task->tk_action = call_reserveresult;
xprt_reserve(task); xprt_reserve(task);
...@@ -1675,8 +1665,6 @@ call_reserveresult(struct rpc_task *task) ...@@ -1675,8 +1665,6 @@ call_reserveresult(struct rpc_task *task)
{ {
int status = task->tk_status; int status = task->tk_status;
dprint_status(task);
/* /*
* After a call to xprt_reserve(), we must have either * After a call to xprt_reserve(), we must have either
* a request slot or else an error status. * a request slot or else an error status.
...@@ -1717,8 +1705,6 @@ call_reserveresult(struct rpc_task *task) ...@@ -1717,8 +1705,6 @@ call_reserveresult(struct rpc_task *task)
static void static void
call_retry_reserve(struct rpc_task *task) call_retry_reserve(struct rpc_task *task)
{ {
dprint_status(task);
task->tk_status = 0; task->tk_status = 0;
task->tk_action = call_reserveresult; task->tk_action = call_reserveresult;
xprt_retry_reserve(task); xprt_retry_reserve(task);
...@@ -1730,8 +1716,6 @@ call_retry_reserve(struct rpc_task *task) ...@@ -1730,8 +1716,6 @@ call_retry_reserve(struct rpc_task *task)
static void static void
call_refresh(struct rpc_task *task) call_refresh(struct rpc_task *task)
{ {
dprint_status(task);
task->tk_action = call_refreshresult; task->tk_action = call_refreshresult;
task->tk_status = 0; task->tk_status = 0;
task->tk_client->cl_stats->rpcauthrefresh++; task->tk_client->cl_stats->rpcauthrefresh++;
...@@ -1746,8 +1730,6 @@ call_refreshresult(struct rpc_task *task) ...@@ -1746,8 +1730,6 @@ call_refreshresult(struct rpc_task *task)
{ {
int status = task->tk_status; int status = task->tk_status;
dprint_status(task);
task->tk_status = 0; task->tk_status = 0;
task->tk_action = call_refresh; task->tk_action = call_refresh;
switch (status) { switch (status) {
...@@ -1770,12 +1752,10 @@ call_refreshresult(struct rpc_task *task) ...@@ -1770,12 +1752,10 @@ call_refreshresult(struct rpc_task *task)
if (!task->tk_cred_retry) if (!task->tk_cred_retry)
break; break;
task->tk_cred_retry--; task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry refresh creds\n", trace_rpc_retry_refresh_status(task);
task->tk_pid, __func__);
return; return;
} }
dprintk("RPC: %5u %s: refresh creds failed with error %d\n", trace_rpc_refresh_status(task);
task->tk_pid, __func__, status);
rpc_call_rpcerror(task, status); rpc_call_rpcerror(task, status);
} }
...@@ -1792,8 +1772,6 @@ call_allocate(struct rpc_task *task) ...@@ -1792,8 +1772,6 @@ call_allocate(struct rpc_task *task)
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
int status; int status;
dprint_status(task);
task->tk_status = 0; task->tk_status = 0;
task->tk_action = call_encode; task->tk_action = call_encode;
...@@ -1823,6 +1801,7 @@ call_allocate(struct rpc_task *task) ...@@ -1823,6 +1801,7 @@ call_allocate(struct rpc_task *task)
req->rq_rcvsize <<= 2; req->rq_rcvsize <<= 2;
status = xprt->ops->buf_alloc(task); status = xprt->ops->buf_alloc(task);
trace_rpc_buf_alloc(task, status);
xprt_inject_disconnect(xprt); xprt_inject_disconnect(xprt);
if (status == 0) if (status == 0)
return; return;
...@@ -1831,8 +1810,6 @@ call_allocate(struct rpc_task *task) ...@@ -1831,8 +1810,6 @@ call_allocate(struct rpc_task *task)
return; return;
} }
dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
task->tk_action = call_allocate; task->tk_action = call_allocate;
rpc_delay(task, HZ>>4); rpc_delay(task, HZ>>4);
...@@ -1883,7 +1860,7 @@ call_encode(struct rpc_task *task) ...@@ -1883,7 +1860,7 @@ call_encode(struct rpc_task *task)
{ {
if (!rpc_task_need_encode(task)) if (!rpc_task_need_encode(task))
goto out; goto out;
dprint_status(task);
/* Dequeue task from the receive queue while we're encoding */ /* Dequeue task from the receive queue while we're encoding */
xprt_request_dequeue_xprt(task); xprt_request_dequeue_xprt(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */ /* Encode here so that rpcsec_gss can use correct sequence number. */
...@@ -1902,8 +1879,7 @@ call_encode(struct rpc_task *task) ...@@ -1902,8 +1879,7 @@ call_encode(struct rpc_task *task)
} else { } else {
task->tk_action = call_refresh; task->tk_action = call_refresh;
task->tk_cred_retry--; task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry refresh creds\n", trace_rpc_retry_refresh_status(task);
task->tk_pid, __func__);
} }
break; break;
default: default:
...@@ -1960,8 +1936,6 @@ call_bind(struct rpc_task *task) ...@@ -1960,8 +1936,6 @@ call_bind(struct rpc_task *task)
return; return;
} }
dprint_status(task);
task->tk_action = call_bind_status; task->tk_action = call_bind_status;
if (!xprt_prepare_transmit(task)) if (!xprt_prepare_transmit(task))
return; return;
...@@ -1983,8 +1957,6 @@ call_bind_status(struct rpc_task *task) ...@@ -1983,8 +1957,6 @@ call_bind_status(struct rpc_task *task)
return; return;
} }
dprint_status(task);
trace_rpc_bind_status(task);
if (task->tk_status >= 0) if (task->tk_status >= 0)
goto out_next; goto out_next;
if (xprt_bound(xprt)) { if (xprt_bound(xprt)) {
...@@ -1994,12 +1966,10 @@ call_bind_status(struct rpc_task *task) ...@@ -1994,12 +1966,10 @@ call_bind_status(struct rpc_task *task)
switch (task->tk_status) { switch (task->tk_status) {
case -ENOMEM: case -ENOMEM:
dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
rpc_delay(task, HZ >> 2); rpc_delay(task, HZ >> 2);
goto retry_timeout; goto retry_timeout;
case -EACCES: case -EACCES:
dprintk("RPC: %5u remote rpcbind: RPC program/version " trace_rpcb_prog_unavail_err(task);
"unavailable\n", task->tk_pid);
/* fail immediately if this is an RPC ping */ /* fail immediately if this is an RPC ping */
if (task->tk_msg.rpc_proc->p_proc == 0) { if (task->tk_msg.rpc_proc->p_proc == 0) {
status = -EOPNOTSUPP; status = -EOPNOTSUPP;
...@@ -2016,17 +1986,14 @@ call_bind_status(struct rpc_task *task) ...@@ -2016,17 +1986,14 @@ call_bind_status(struct rpc_task *task)
case -EAGAIN: case -EAGAIN:
goto retry_timeout; goto retry_timeout;
case -ETIMEDOUT: case -ETIMEDOUT:
dprintk("RPC: %5u rpcbind request timed out\n", trace_rpcb_timeout_err(task);
task->tk_pid);
goto retry_timeout; goto retry_timeout;
case -EPFNOSUPPORT: case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */ /* server doesn't support any rpcbind version we know of */
dprintk("RPC: %5u unrecognized remote rpcbind service\n", trace_rpcb_bind_version_err(task);
task->tk_pid);
break; break;
case -EPROTONOSUPPORT: case -EPROTONOSUPPORT:
dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", trace_rpcb_bind_version_err(task);
task->tk_pid);
goto retry_timeout; goto retry_timeout;
case -ECONNREFUSED: /* connection problems */ case -ECONNREFUSED: /* connection problems */
case -ECONNRESET: case -ECONNRESET:
...@@ -2037,8 +2004,7 @@ call_bind_status(struct rpc_task *task) ...@@ -2037,8 +2004,7 @@ call_bind_status(struct rpc_task *task)
case -EHOSTUNREACH: case -EHOSTUNREACH:
case -ENETUNREACH: case -ENETUNREACH:
case -EPIPE: case -EPIPE:
dprintk("RPC: %5u remote rpcbind unreachable: %d\n", trace_rpcb_unreachable_err(task);
task->tk_pid, task->tk_status);
if (!RPC_IS_SOFTCONN(task)) { if (!RPC_IS_SOFTCONN(task)) {
rpc_delay(task, 5*HZ); rpc_delay(task, 5*HZ);
goto retry_timeout; goto retry_timeout;
...@@ -2046,8 +2012,7 @@ call_bind_status(struct rpc_task *task) ...@@ -2046,8 +2012,7 @@ call_bind_status(struct rpc_task *task)
status = task->tk_status; status = task->tk_status;
break; break;
default: default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", trace_rpcb_unrecognized_err(task);
task->tk_pid, -task->tk_status);
} }
rpc_call_rpcerror(task, status); rpc_call_rpcerror(task, status);
...@@ -2079,10 +2044,6 @@ call_connect(struct rpc_task *task) ...@@ -2079,10 +2044,6 @@ call_connect(struct rpc_task *task)
return; return;
} }
dprintk("RPC: %5u call_connect xprt %p %s connected\n",
task->tk_pid, xprt,
(xprt_connected(xprt) ? "is" : "is not"));
task->tk_action = call_connect_status; task->tk_action = call_connect_status;
if (task->tk_status < 0) if (task->tk_status < 0)
return; return;
...@@ -2110,7 +2071,6 @@ call_connect_status(struct rpc_task *task) ...@@ -2110,7 +2071,6 @@ call_connect_status(struct rpc_task *task)
return; return;
} }
dprint_status(task);
trace_rpc_connect_status(task); trace_rpc_connect_status(task);
if (task->tk_status == 0) { if (task->tk_status == 0) {
...@@ -2178,8 +2138,6 @@ call_transmit(struct rpc_task *task) ...@@ -2178,8 +2138,6 @@ call_transmit(struct rpc_task *task)
return; return;
} }
dprint_status(task);
task->tk_action = call_transmit_status; task->tk_action = call_transmit_status;
if (!xprt_prepare_transmit(task)) if (!xprt_prepare_transmit(task))
return; return;
...@@ -2214,7 +2172,6 @@ call_transmit_status(struct rpc_task *task) ...@@ -2214,7 +2172,6 @@ call_transmit_status(struct rpc_task *task)
switch (task->tk_status) { switch (task->tk_status) {
default: default:
dprint_status(task);
break; break;
case -EBADMSG: case -EBADMSG:
task->tk_status = 0; task->tk_status = 0;
...@@ -2296,8 +2253,6 @@ call_bc_transmit_status(struct rpc_task *task) ...@@ -2296,8 +2253,6 @@ call_bc_transmit_status(struct rpc_task *task)
if (rpc_task_transmitted(task)) if (rpc_task_transmitted(task))
task->tk_status = 0; task->tk_status = 0;
dprint_status(task);
switch (task->tk_status) { switch (task->tk_status) {
case 0: case 0:
/* Success */ /* Success */
...@@ -2357,8 +2312,6 @@ call_status(struct rpc_task *task) ...@@ -2357,8 +2312,6 @@ call_status(struct rpc_task *task)
if (!task->tk_msg.rpc_proc->p_proc) if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt, task->tk_status); trace_xprt_ping(task->tk_xprt, task->tk_status);
dprint_status(task);
status = task->tk_status; status = task->tk_status;
if (status >= 0) { if (status >= 0) {
task->tk_action = call_decode; task->tk_action = call_decode;
...@@ -2405,7 +2358,8 @@ call_status(struct rpc_task *task) ...@@ -2405,7 +2358,8 @@ call_status(struct rpc_task *task)
goto out_exit; goto out_exit;
} }
task->tk_action = call_encode; task->tk_action = call_encode;
rpc_check_timeout(task); if (status != -ECONNRESET && status != -ECONNABORTED)
rpc_check_timeout(task);
return; return;
out_exit: out_exit:
rpc_call_rpcerror(task, status); rpc_call_rpcerror(task, status);
...@@ -2433,7 +2387,7 @@ rpc_check_timeout(struct rpc_task *task) ...@@ -2433,7 +2387,7 @@ rpc_check_timeout(struct rpc_task *task)
if (xprt_adjust_timeout(task->tk_rqstp) == 0) if (xprt_adjust_timeout(task->tk_rqstp) == 0)
return; return;
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); trace_rpc_timeout_status(task);
task->tk_timeouts++; task->tk_timeouts++;
if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
...@@ -2492,8 +2446,6 @@ call_decode(struct rpc_task *task) ...@@ -2492,8 +2446,6 @@ call_decode(struct rpc_task *task)
struct xdr_stream xdr; struct xdr_stream xdr;
int err; int err;
dprint_status(task);
if (!task->tk_msg.rpc_proc->p_decode) { if (!task->tk_msg.rpc_proc->p_decode) {
task->tk_action = rpc_exit_task; task->tk_action = rpc_exit_task;
return; return;
...@@ -2537,8 +2489,6 @@ call_decode(struct rpc_task *task) ...@@ -2537,8 +2489,6 @@ call_decode(struct rpc_task *task)
case 0: case 0:
task->tk_action = rpc_exit_task; task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, &xdr); task->tk_status = rpcauth_unwrap_resp(task, &xdr);
dprintk("RPC: %5u %s result %d\n",
task->tk_pid, __func__, task->tk_status);
return; return;
case -EAGAIN: case -EAGAIN:
task->tk_status = 0; task->tk_status = 0;
......
This diff is collapsed.
...@@ -27,10 +27,6 @@ ...@@ -27,10 +27,6 @@
#include "sunrpc.h" #include "sunrpc.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
#define RPCDBG_FACILITY RPCDBG_SCHED
#endif
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/sunrpc.h> #include <trace/events/sunrpc.h>
...@@ -85,7 +81,6 @@ __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) ...@@ -85,7 +81,6 @@ __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{ {
if (list_empty(&task->u.tk_wait.timer_list)) if (list_empty(&task->u.tk_wait.timer_list))
return; return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout = 0; task->tk_timeout = 0;
list_del(&task->u.tk_wait.timer_list); list_del(&task->u.tk_wait.timer_list);
if (list_empty(&queue->timer_list.list)) if (list_empty(&queue->timer_list.list))
...@@ -111,9 +106,6 @@ static void ...@@ -111,9 +106,6 @@ static void
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
unsigned long timeout) unsigned long timeout)
{ {
dprintk("RPC: %5u setting alarm for %u ms\n",
task->tk_pid, jiffies_to_msecs(timeout - jiffies));
task->tk_timeout = timeout; task->tk_timeout = timeout;
if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
rpc_set_queue_timer(queue, timeout); rpc_set_queue_timer(queue, timeout);
...@@ -216,9 +208,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, ...@@ -216,9 +208,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
/* barrier matches the read in rpc_wake_up_task_queue_locked() */ /* barrier matches the read in rpc_wake_up_task_queue_locked() */
smp_wmb(); smp_wmb();
rpc_set_queued(task); rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
} }
/* /*
...@@ -241,8 +230,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas ...@@ -241,8 +230,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
else else
list_del(&task->u.tk_wait.list); list_del(&task->u.tk_wait.list);
queue->qlen--; queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
} }
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
...@@ -382,13 +369,9 @@ static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, ...@@ -382,13 +369,9 @@ static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
struct rpc_task *task, struct rpc_task *task,
unsigned char queue_priority) unsigned char queue_priority)
{ {
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
trace_rpc_task_sleep(task, q); trace_rpc_task_sleep(task, q);
__rpc_add_wait_queue(q, task, queue_priority); __rpc_add_wait_queue(q, task, queue_priority);
} }
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
...@@ -510,9 +493,6 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, ...@@ -510,9 +493,6 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
struct rpc_wait_queue *queue, struct rpc_wait_queue *queue,
struct rpc_task *task) struct rpc_task *task)
{ {
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
/* Has the task been executed yet? If not, we cannot wake it up! */ /* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) { if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
...@@ -524,8 +504,6 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, ...@@ -524,8 +504,6 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
__rpc_remove_wait_queue(queue, task); __rpc_remove_wait_queue(queue, task);
rpc_make_runnable(wq, task); rpc_make_runnable(wq, task);
dprintk("RPC: __rpc_wake_up_task done\n");
} }
/* /*
...@@ -663,8 +641,6 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, ...@@ -663,8 +641,6 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
{ {
struct rpc_task *task = NULL; struct rpc_task *task = NULL;
dprintk("RPC: wake_up_first(%p \"%s\")\n",
queue, rpc_qname(queue));
spin_lock(&queue->lock); spin_lock(&queue->lock);
task = __rpc_find_next_queued(queue); task = __rpc_find_next_queued(queue);
if (task != NULL) if (task != NULL)
...@@ -770,7 +746,7 @@ static void __rpc_queue_timer_fn(struct work_struct *work) ...@@ -770,7 +746,7 @@ static void __rpc_queue_timer_fn(struct work_struct *work)
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
timeo = task->tk_timeout; timeo = task->tk_timeout;
if (time_after_eq(now, timeo)) { if (time_after_eq(now, timeo)) {
dprintk("RPC: %5u timeout\n", task->tk_pid); trace_rpc_task_timeout(task, task->tk_action);
task->tk_status = -ETIMEDOUT; task->tk_status = -ETIMEDOUT;
rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_task_queue_locked(queue, task);
continue; continue;
...@@ -885,9 +861,6 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -885,9 +861,6 @@ static void __rpc_execute(struct rpc_task *task)
int task_is_async = RPC_IS_ASYNC(task); int task_is_async = RPC_IS_ASYNC(task);
int status = 0; int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
WARN_ON_ONCE(RPC_IS_QUEUED(task)); WARN_ON_ONCE(RPC_IS_QUEUED(task));
if (RPC_IS_QUEUED(task)) if (RPC_IS_QUEUED(task))
return; return;
...@@ -947,7 +920,7 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -947,7 +920,7 @@ static void __rpc_execute(struct rpc_task *task)
return; return;
/* sync task: sleep here */ /* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); trace_rpc_task_sync_sleep(task, task->tk_action);
status = out_of_line_wait_on_bit(&task->tk_runstate, status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable, RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE); TASK_KILLABLE);
...@@ -963,11 +936,9 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -963,11 +936,9 @@ static void __rpc_execute(struct rpc_task *task)
task->tk_rpc_status = -ERESTARTSYS; task->tk_rpc_status = -ERESTARTSYS;
rpc_exit(task, -ERESTARTSYS); rpc_exit(task, -ERESTARTSYS);
} }
dprintk("RPC: %5u sync task resuming\n", task->tk_pid); trace_rpc_task_sync_wake(task, task->tk_action);
} }
dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
task->tk_status);
/* Release all resources associated with the task */ /* Release all resources associated with the task */
rpc_release_task(task); rpc_release_task(task);
} }
...@@ -1036,8 +1007,6 @@ int rpc_malloc(struct rpc_task *task) ...@@ -1036,8 +1007,6 @@ int rpc_malloc(struct rpc_task *task)
return -ENOMEM; return -ENOMEM;
buf->len = size; buf->len = size;
dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
task->tk_pid, size, buf);
rqst->rq_buffer = buf->data; rqst->rq_buffer = buf->data;
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0; return 0;
...@@ -1058,9 +1027,6 @@ void rpc_free(struct rpc_task *task) ...@@ -1058,9 +1027,6 @@ void rpc_free(struct rpc_task *task)
buf = container_of(buffer, struct rpc_buffer, data); buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len; size = buf->len;
dprintk("RPC: freeing buffer of size %zu at %p\n",
size, buf);
if (size <= RPC_BUFFER_MAXSIZE) if (size <= RPC_BUFFER_MAXSIZE)
mempool_free(buf, rpc_buffer_mempool); mempool_free(buf, rpc_buffer_mempool);
else else
...@@ -1095,9 +1061,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta ...@@ -1095,9 +1061,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_action = rpc_prepare_task; task->tk_action = rpc_prepare_task;
rpc_init_task_statistics(task); rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
} }
static struct rpc_task * static struct rpc_task *
...@@ -1121,7 +1084,6 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) ...@@ -1121,7 +1084,6 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
rpc_init_task(task, setup_data); rpc_init_task(task, setup_data);
task->tk_flags |= flags; task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task; return task;
} }
...@@ -1151,10 +1113,8 @@ static void rpc_free_task(struct rpc_task *task) ...@@ -1151,10 +1113,8 @@ static void rpc_free_task(struct rpc_task *task)
put_rpccred(task->tk_op_cred); put_rpccred(task->tk_op_cred);
rpc_release_calldata(task->tk_ops, task->tk_calldata); rpc_release_calldata(task->tk_ops, task->tk_calldata);
if (tk_flags & RPC_TASK_DYNAMIC) { if (tk_flags & RPC_TASK_DYNAMIC)
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool); mempool_free(task, rpc_task_mempool);
}
} }
static void rpc_async_release(struct work_struct *work) static void rpc_async_release(struct work_struct *work)
...@@ -1208,8 +1168,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task_async); ...@@ -1208,8 +1168,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task) static void rpc_release_task(struct rpc_task *task)
{ {
dprintk("RPC: %5u release task\n", task->tk_pid);
WARN_ON_ONCE(RPC_IS_QUEUED(task)); WARN_ON_ONCE(RPC_IS_QUEUED(task));
rpc_release_resources_task(task); rpc_release_resources_task(task);
...@@ -1250,7 +1208,6 @@ static int rpciod_start(void) ...@@ -1250,7 +1208,6 @@ static int rpciod_start(void)
/* /*
* Create the rpciod thread and wait for it to start. * Create the rpciod thread and wait for it to start.
*/ */
dprintk("RPC: creating workqueue rpciod\n");
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!wq) if (!wq)
goto out_failed; goto out_failed;
...@@ -1275,7 +1232,6 @@ static void rpciod_stop(void) ...@@ -1275,7 +1232,6 @@ static void rpciod_stop(void)
if (rpciod_workqueue == NULL) if (rpciod_workqueue == NULL)
return; return;
dprintk("RPC: destroying workqueue rpciod\n");
wq = rpciod_workqueue; wq = rpciod_workqueue;
rpciod_workqueue = NULL; rpciod_workqueue = NULL;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
NetApp provides this source code under the GPL v2 License. NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php. https://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
......
This diff is collapsed.
...@@ -834,8 +834,7 @@ void xprt_connect(struct rpc_task *task) ...@@ -834,8 +834,7 @@ void xprt_connect(struct rpc_task *task)
{ {
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, trace_xprt_connect(xprt);
xprt, (xprt_connected(xprt) ? "is" : "is not"));
if (!xprt_bound(xprt)) { if (!xprt_bound(xprt)) {
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
...@@ -1131,8 +1130,6 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) ...@@ -1131,8 +1130,6 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
xprt->stat.recvs++; xprt->stat.recvs++;
req->rq_private_buf.len = copied; req->rq_private_buf.len = copied;
...@@ -1269,7 +1266,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1269,7 +1266,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */ /* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit); list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 1);
goto out; goto out;
} }
} else if (RPC_IS_SWAPPER(task)) { } else if (RPC_IS_SWAPPER(task)) {
...@@ -1281,7 +1277,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1281,7 +1277,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */ /* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit); list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 2);
goto out; goto out;
} }
} else if (!req->rq_seqno) { } else if (!req->rq_seqno) {
...@@ -1290,13 +1285,11 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1290,13 +1285,11 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
continue; continue;
list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
INIT_LIST_HEAD(&req->rq_xmit); INIT_LIST_HEAD(&req->rq_xmit);
trace_xprt_enq_xmit(task, 3);
goto out; goto out;
} }
} }
list_add_tail(&req->rq_xmit, &xprt->xmit_queue); list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 4);
out: out:
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
...@@ -1414,9 +1407,9 @@ bool xprt_prepare_transmit(struct rpc_task *task) ...@@ -1414,9 +1407,9 @@ bool xprt_prepare_transmit(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
if (!xprt_lock_write(xprt, task)) { if (!xprt_lock_write(xprt, task)) {
trace_xprt_transmit_queued(xprt, task);
/* Race breaker: someone may have transmitted us */ /* Race breaker: someone may have transmitted us */
if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
rpc_wake_up_queued_task_set_status(&xprt->sending, rpc_wake_up_queued_task_set_status(&xprt->sending,
...@@ -1520,10 +1513,13 @@ xprt_transmit(struct rpc_task *task) ...@@ -1520,10 +1513,13 @@ xprt_transmit(struct rpc_task *task)
{ {
struct rpc_rqst *next, *req = task->tk_rqstp; struct rpc_rqst *next, *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
int status; int counter, status;
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
counter = 0;
while (!list_empty(&xprt->xmit_queue)) { while (!list_empty(&xprt->xmit_queue)) {
if (++counter == 20)
break;
next = list_first_entry(&xprt->xmit_queue, next = list_first_entry(&xprt->xmit_queue,
struct rpc_rqst, rq_xmit); struct rpc_rqst, rq_xmit);
xprt_pin_rqst(next); xprt_pin_rqst(next);
...@@ -1531,7 +1527,6 @@ xprt_transmit(struct rpc_task *task) ...@@ -1531,7 +1527,6 @@ xprt_transmit(struct rpc_task *task)
status = xprt_request_transmit(next, task); status = xprt_request_transmit(next, task);
if (status == -EBADMSG && next != req) if (status == -EBADMSG && next != req)
status = 0; status = 0;
cond_resched();
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
xprt_unpin_rqst(next); xprt_unpin_rqst(next);
if (status == 0) { if (status == 0) {
...@@ -1747,8 +1742,8 @@ xprt_request_init(struct rpc_task *task) ...@@ -1747,8 +1742,8 @@ xprt_request_init(struct rpc_task *task)
req->rq_rcv_buf.bvec = NULL; req->rq_rcv_buf.bvec = NULL;
req->rq_release_snd_buf = NULL; req->rq_release_snd_buf = NULL;
xprt_init_majortimeo(task, req); xprt_init_majortimeo(task, req);
dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
req, ntohl(req->rq_xid)); trace_xprt_reserve(req);
} }
static void static void
...@@ -1838,7 +1833,6 @@ void xprt_release(struct rpc_task *task) ...@@ -1838,7 +1833,6 @@ void xprt_release(struct rpc_task *task)
if (req->rq_release_snd_buf) if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req); req->rq_release_snd_buf(req);
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
if (likely(!bc_prealloc(req))) if (likely(!bc_prealloc(req)))
xprt->ops->free_slot(xprt, req); xprt->ops->free_slot(xprt, req);
else else
......
...@@ -124,7 +124,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) ...@@ -124,7 +124,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
if (IS_ERR(frmr)) if (IS_ERR(frmr))
goto out_mr_err; goto out_mr_err;
sg = kcalloc(depth, sizeof(*sg), GFP_NOFS); sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
if (!sg) if (!sg)
goto out_list_err; goto out_list_err;
......
...@@ -413,9 +413,6 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) ...@@ -413,9 +413,6 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
snprintf(buf, sizeof(buf), "%4hx", port); snprintf(buf, sizeof(buf), "%4hx", port);
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
trace_xprtrdma_op_setport(container_of(xprt, struct rpcrdma_xprt,
rx_xprt));
} }
/** /**
...@@ -586,11 +583,9 @@ xprt_rdma_allocate(struct rpc_task *task) ...@@ -586,11 +583,9 @@ xprt_rdma_allocate(struct rpc_task *task)
rqst->rq_buffer = rdmab_data(req->rl_sendbuf); rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf); rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf);
trace_xprtrdma_op_allocate(task, req);
return 0; return 0;
out_fail: out_fail:
trace_xprtrdma_op_allocate(task, NULL);
return -ENOMEM; return -ENOMEM;
} }
...@@ -607,8 +602,6 @@ xprt_rdma_free(struct rpc_task *task) ...@@ -607,8 +602,6 @@ xprt_rdma_free(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
trace_xprtrdma_op_free(task, req);
if (!list_empty(&req->rl_registered)) if (!list_empty(&req->rl_registered))
frwr_unmap_sync(r_xprt, req); frwr_unmap_sync(r_xprt, req);
......
...@@ -762,10 +762,7 @@ static int xs_nospace(struct rpc_rqst *req) ...@@ -762,10 +762,7 @@ static int xs_nospace(struct rpc_rqst *req)
struct sock *sk = transport->inet; struct sock *sk = transport->inet;
int ret = -EAGAIN; int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", trace_rpc_socket_nospace(req, transport);
req->rq_task->tk_pid,
req->rq_slen - transport->xmit.offset,
req->rq_slen);
/* Protect against races with write_space */ /* Protect against races with write_space */
spin_lock(&xprt->transport_lock); spin_lock(&xprt->transport_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment