Commit cca5172a authored by YOSHIFUJI Hideaki's avatar YOSHIFUJI Hideaki Committed by David S. Miller

[NET] SUNRPC: Fix whitespace errors.

Signed-off-by: default avatarYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d808ad9a
...@@ -213,7 +213,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, ...@@ -213,7 +213,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
rpcauth_gc_credcache(auth, &free); rpcauth_gc_credcache(auth, &free);
hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
struct rpc_cred *entry; struct rpc_cred *entry;
entry = hlist_entry(pos, struct rpc_cred, cr_hash); entry = hlist_entry(pos, struct rpc_cred, cr_hash);
if (entry->cr_ops->crmatch(acred, entry, flags)) { if (entry->cr_ops->crmatch(acred, entry, flags)) {
hlist_del(&entry->cr_hash); hlist_del(&entry->cr_hash);
cred = entry; cred = entry;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* linux/net/sunrpc/auth_gss/auth_gss.c * linux/net/sunrpc/auth_gss/auth_gss.c
* *
* RPCSEC_GSS client authentication. * RPCSEC_GSS client authentication.
* *
* Copyright (c) 2000 The Regents of the University of Michigan. * Copyright (c) 2000 The Regents of the University of Michigan.
* All rights reserved. * All rights reserved.
* *
...@@ -74,7 +74,7 @@ static struct rpc_credops gss_credops; ...@@ -74,7 +74,7 @@ static struct rpc_credops gss_credops;
* as it is passed to gssd to signal the use of * as it is passed to gssd to signal the use of
* machine creds should be part of the shared rpc interface */ * machine creds should be part of the shared rpc interface */
#define CA_RUN_AS_MACHINE 0x00000200 #define CA_RUN_AS_MACHINE 0x00000200
/* dump the buffer in `emacs-hexl' style */ /* dump the buffer in `emacs-hexl' style */
#define isprint(c) ((c > 0x1f) && (c < 0x7f)) #define isprint(c) ((c > 0x1f) && (c < 0x7f))
...@@ -607,8 +607,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) ...@@ -607,8 +607,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
} }
} }
/* /*
* NOTE: we have the opportunity to use different * NOTE: we have the opportunity to use different
* parameters based on the input flavor (which must be a pseudoflavor) * parameters based on the input flavor (which must be a pseudoflavor)
*/ */
static struct rpc_auth * static struct rpc_auth *
...@@ -869,7 +869,7 @@ gss_validate(struct rpc_task *task, __be32 *p) ...@@ -869,7 +869,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
flav = ntohl(*p++); flav = ntohl(*p++);
if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
goto out_bad; goto out_bad;
if (flav != RPC_AUTH_GSS) if (flav != RPC_AUTH_GSS)
goto out_bad; goto out_bad;
seq = htonl(task->tk_rqstp->rq_seqno); seq = htonl(task->tk_rqstp->rq_seqno);
...@@ -925,7 +925,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, ...@@ -925,7 +925,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
*integ_len = htonl(integ_buf.len); *integ_len = htonl(integ_buf.len);
/* guess whether we're in the head or the tail: */ /* guess whether we're in the head or the tail: */
if (snd_buf->page_len || snd_buf->tail[0].iov_len) if (snd_buf->page_len || snd_buf->tail[0].iov_len)
iov = snd_buf->tail; iov = snd_buf->tail;
else else
iov = snd_buf->head; iov = snd_buf->head;
...@@ -1030,7 +1030,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, ...@@ -1030,7 +1030,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* RPC_SLACK_SPACE should prevent this ever happening: */ /* RPC_SLACK_SPACE should prevent this ever happening: */
BUG_ON(snd_buf->len > snd_buf->buflen); BUG_ON(snd_buf->len > snd_buf->buflen);
status = -EIO; status = -EIO;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */ * done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED) if (maj_stat == GSS_S_CONTEXT_EXPIRED)
...@@ -1079,7 +1079,7 @@ gss_wrap_req(struct rpc_task *task, ...@@ -1079,7 +1079,7 @@ gss_wrap_req(struct rpc_task *task,
status = gss_wrap_req_integ(cred, ctx, encode, status = gss_wrap_req_integ(cred, ctx, encode,
rqstp, p, obj); rqstp, p, obj);
break; break;
case RPC_GSS_SVC_PRIVACY: case RPC_GSS_SVC_PRIVACY:
status = gss_wrap_req_priv(cred, ctx, encode, status = gss_wrap_req_priv(cred, ctx, encode,
rqstp, p, obj); rqstp, p, obj);
break; break;
...@@ -1179,7 +1179,7 @@ gss_unwrap_resp(struct rpc_task *task, ...@@ -1179,7 +1179,7 @@ gss_unwrap_resp(struct rpc_task *task,
if (status) if (status)
goto out; goto out;
break; break;
case RPC_GSS_SVC_PRIVACY: case RPC_GSS_SVC_PRIVACY:
status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
if (status) if (status)
goto out; goto out;
...@@ -1196,7 +1196,7 @@ gss_unwrap_resp(struct rpc_task *task, ...@@ -1196,7 +1196,7 @@ gss_unwrap_resp(struct rpc_task *task,
status); status);
return status; return status;
} }
static struct rpc_authops authgss_ops = { static struct rpc_authops authgss_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.au_flavor = RPC_AUTH_GSS, .au_flavor = RPC_AUTH_GSS,
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
/* /*
* Copyright 1993 by OpenVision Technologies, Inc. * Copyright 1993 by OpenVision Technologies, Inc.
* *
* Permission to use, copy, modify, distribute, and sell this software * Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee, * and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and * provided that the above copyright notice appears in all copies and
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* without specific, written prior permission. OpenVision makes no * without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any * representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty. * purpose. It is provided "as is" without express or implied warranty.
* *
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
...@@ -201,7 +201,7 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size, ...@@ -201,7 +201,7 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
return(G_BAD_TOK_HEADER); return(G_BAD_TOK_HEADER);
if (*buf++ != 0x06) if (*buf++ != 0x06)
return(G_BAD_TOK_HEADER); return(G_BAD_TOK_HEADER);
if ((toksize-=1) < 0) if ((toksize-=1) < 0)
return(G_BAD_TOK_HEADER); return(G_BAD_TOK_HEADER);
toid.len = *buf++; toid.len = *buf++;
...@@ -211,9 +211,9 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size, ...@@ -211,9 +211,9 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
toid.data = buf; toid.data = buf;
buf+=toid.len; buf+=toid.len;
if (! g_OID_equal(&toid, mech)) if (! g_OID_equal(&toid, mech))
ret = G_WRONG_MECH; ret = G_WRONG_MECH;
/* G_WRONG_MECH is not returned immediately because it's more important /* G_WRONG_MECH is not returned immediately because it's more important
to return G_BAD_TOK_HEADER if the token header is in fact bad */ to return G_BAD_TOK_HEADER if the token header is in fact bad */
......
...@@ -58,7 +58,7 @@ krb5_encrypt( ...@@ -58,7 +58,7 @@ krb5_encrypt(
int length) int length)
{ {
u32 ret = -EINVAL; u32 ret = -EINVAL;
struct scatterlist sg[1]; struct scatterlist sg[1];
u8 local_iv[16] = {0}; u8 local_iv[16] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
...@@ -67,7 +67,7 @@ krb5_encrypt( ...@@ -67,7 +67,7 @@ krb5_encrypt(
if (crypto_blkcipher_ivsize(tfm) > 16) { if (crypto_blkcipher_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
crypto_blkcipher_ivsize(tfm)); crypto_blkcipher_ivsize(tfm));
goto out; goto out;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
/* /*
* Copyright 1993 by OpenVision Technologies, Inc. * Copyright 1993 by OpenVision Technologies, Inc.
* *
* Permission to use, copy, modify, distribute, and sell this software * Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee, * and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and * provided that the above copyright notice appears in all copies and
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* without specific, written prior permission. OpenVision makes no * without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any * representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty. * purpose. It is provided "as is" without express or implied warranty.
* *
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
......
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
* *
* J. Bruce Fields <bfields@umich.edu> * J. Bruce Fields <bfields@umich.edu>
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
* are met: * are met:
* *
* 1. Redistributions of source code must retain the above copyright * 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer. * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright * 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the * notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. * documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its * 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived * contributors may be used to endorse or promote products derived
......
...@@ -59,7 +59,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits) ...@@ -59,7 +59,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
/* count trailing 0's */ /* count trailing 0's */
for(i = in->len; i > 0; i--) { for(i = in->len; i > 0; i--) {
if (*ptr == 0) { if (*ptr == 0) {
ptr--; ptr--;
elen--; elen--;
} else } else
...@@ -82,7 +82,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits) ...@@ -82,7 +82,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
/* /*
* decode_asn1_bitstring() * decode_asn1_bitstring()
* *
* decode a bitstring into a buffer of the expected length. * decode a bitstring into a buffer of the expected length.
* enclen = bit string length * enclen = bit string length
* explen = expected length (define in rfc) * explen = expected length (define in rfc)
...@@ -97,9 +97,9 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) ...@@ -97,9 +97,9 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
return 1; return 1;
} }
/* /*
* SPKMInnerContextToken choice SPKM_MIC asn1 token layout * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
* *
* contextid is always 16 bytes plain data. max asn1 bitstring len = 17. * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
* *
* tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
...@@ -107,21 +107,21 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) ...@@ -107,21 +107,21 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
* pos value * pos value
* ---------- * ----------
* [0] a4 SPKM-MIC tag * [0] a4 SPKM-MIC tag
* [1] ?? innertoken length (max 44) * [1] ?? innertoken length (max 44)
* *
* *
* tok_hdr piece of checksum data starts here * tok_hdr piece of checksum data starts here
* *
* the maximum mic-header len = 9 + 17 = 26 * the maximum mic-header len = 9 + 17 = 26
* mic-header * mic-header
* ---------- * ----------
* [2] 30 SEQUENCE tag * [2] 30 SEQUENCE tag
* [3] ?? mic-header length: (max 23) = TokenID + ContextID * [3] ?? mic-header length: (max 23) = TokenID + ContextID
* *
* TokenID - all fields constant and can be hardcoded * TokenID - all fields constant and can be hardcoded
* ------- * -------
* [4] 02 Type 2 * [4] 02 Type 2
* [5] 02 Length 2 * [5] 02 Length 2
* [6][7] 01 01 TokenID (SPKM_MIC_TOK) * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
* *
* ContextID - encoded length not constant, calculated * ContextID - encoded length not constant, calculated
...@@ -131,17 +131,17 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) ...@@ -131,17 +131,17 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
* [10] ?? ctxzbit * [10] ?? ctxzbit
* [11] contextid * [11] contextid
* *
* mic_header piece of checksum data ends here. * mic_header piece of checksum data ends here.
* *
* int-cksum - encoded length not constant, calculated * int-cksum - encoded length not constant, calculated
* --------- * ---------
* [??] 03 Type 3 * [??] 03 Type 3
* [??] ?? encoded length * [??] ?? encoded length
* [??] ?? md5zbit * [??] ?? md5zbit
* [??] int-cksum (NID_md5 = 16) * [??] int-cksum (NID_md5 = 16)
* *
* maximum SPKM-MIC innercontext token length = * maximum SPKM-MIC innercontext token length =
* 10 + encoded contextid_size(17 max) + 2 + encoded * 10 + encoded contextid_size(17 max) + 2 + encoded
* cksum_size (17 maxfor NID_md5) = 46 * cksum_size (17 maxfor NID_md5) = 46
*/ */
...@@ -178,8 +178,8 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct ...@@ -178,8 +178,8 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
/* /*
* spkm3_mic_innercontext_token() * spkm3_mic_innercontext_token()
* *
* *tokp points to the beginning of the SPKM_MIC token described * *tokp points to the beginning of the SPKM_MIC token described
* in rfc 2025, section 3.2.1: * in rfc 2025, section 3.2.1:
* *
* toklen is the inner token length * toklen is the inner token length
*/ */
...@@ -209,7 +209,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck ...@@ -209,7 +209,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
/* spkm3 innercontext token preamble */ /* spkm3 innercontext token preamble */
if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
dprintk("RPC: BAD SPKM ictoken preamble\n"); dprintk("RPC: BAD SPKM ictoken preamble\n");
goto out; goto out;
} }
...@@ -245,9 +245,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck ...@@ -245,9 +245,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
goto out; goto out;
/* /*
* in the current implementation: the optional int-alg is not present * in the current implementation: the optional int-alg is not present
* so the default int-alg (md5) is used the optional snd-seq field is * so the default int-alg (md5) is used the optional snd-seq field is
* also not present * also not present
*/ */
if (*mic_hdrlen != 6 + ctxelen) { if (*mic_hdrlen != 6 + ctxelen) {
...@@ -255,7 +255,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck ...@@ -255,7 +255,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
goto out; goto out;
} }
/* checksum */ /* checksum */
*cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
ret = GSS_S_COMPLETE; ret = GSS_S_COMPLETE;
out: out:
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
/* /*
* spkm3_read_token() * spkm3_read_token()
* *
* only SPKM_MIC_TOK with md5 intg-alg is supported * only SPKM_MIC_TOK with md5 intg-alg is supported
*/ */
u32 u32
......
...@@ -172,8 +172,8 @@ static struct cache_head *rsi_alloc(void) ...@@ -172,8 +172,8 @@ static struct cache_head *rsi_alloc(void)
} }
static void rsi_request(struct cache_detail *cd, static void rsi_request(struct cache_detail *cd,
struct cache_head *h, struct cache_head *h,
char **bpp, int *blen) char **bpp, int *blen)
{ {
struct rsi *rsii = container_of(h, struct rsi, h); struct rsi *rsii = container_of(h, struct rsi, h);
...@@ -184,7 +184,7 @@ static void rsi_request(struct cache_detail *cd, ...@@ -184,7 +184,7 @@ static void rsi_request(struct cache_detail *cd,
static int rsi_parse(struct cache_detail *cd, static int rsi_parse(struct cache_detail *cd,
char *mesg, int mlen) char *mesg, int mlen)
{ {
/* context token expiry major minor context token */ /* context token expiry major minor context token */
char *buf = mesg; char *buf = mesg;
......
...@@ -274,7 +274,7 @@ int cache_check(struct cache_detail *detail, ...@@ -274,7 +274,7 @@ int cache_check(struct cache_detail *detail,
* *
* A table is then only scanned if the current time is at least * A table is then only scanned if the current time is at least
* the nextcheck time. * the nextcheck time.
* *
*/ */
static LIST_HEAD(cache_list); static LIST_HEAD(cache_list);
...@@ -296,16 +296,16 @@ void cache_register(struct cache_detail *cd) ...@@ -296,16 +296,16 @@ void cache_register(struct cache_detail *cd)
struct proc_dir_entry *p; struct proc_dir_entry *p;
cd->proc_ent->owner = cd->owner; cd->proc_ent->owner = cd->owner;
cd->channel_ent = cd->content_ent = NULL; cd->channel_ent = cd->content_ent = NULL;
p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
cd->proc_ent); cd->proc_ent);
cd->flush_ent = p; cd->flush_ent = p;
if (p) { if (p) {
p->proc_fops = &cache_flush_operations; p->proc_fops = &cache_flush_operations;
p->owner = cd->owner; p->owner = cd->owner;
p->data = cd; p->data = cd;
} }
if (cd->cache_request || cd->cache_parse) { if (cd->cache_request || cd->cache_parse) {
p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
cd->proc_ent); cd->proc_ent);
...@@ -316,16 +316,16 @@ void cache_register(struct cache_detail *cd) ...@@ -316,16 +316,16 @@ void cache_register(struct cache_detail *cd)
p->data = cd; p->data = cd;
} }
} }
if (cd->cache_show) { if (cd->cache_show) {
p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
cd->proc_ent); cd->proc_ent);
cd->content_ent = p; cd->content_ent = p;
if (p) { if (p) {
p->proc_fops = &content_file_operations; p->proc_fops = &content_file_operations;
p->owner = cd->owner; p->owner = cd->owner;
p->data = cd; p->data = cd;
} }
} }
} }
rwlock_init(&cd->hash_lock); rwlock_init(&cd->hash_lock);
INIT_LIST_HEAD(&cd->queue); INIT_LIST_HEAD(&cd->queue);
...@@ -417,15 +417,15 @@ static int cache_clean(void) ...@@ -417,15 +417,15 @@ static int cache_clean(void)
current_index++; current_index++;
/* find a cleanable entry in the bucket and clean it, or set to next bucket */ /* find a cleanable entry in the bucket and clean it, or set to next bucket */
if (current_detail && current_index < current_detail->hash_size) { if (current_detail && current_index < current_detail->hash_size) {
struct cache_head *ch, **cp; struct cache_head *ch, **cp;
struct cache_detail *d; struct cache_detail *d;
write_lock(&current_detail->hash_lock); write_lock(&current_detail->hash_lock);
/* Ok, now to clean this strand */ /* Ok, now to clean this strand */
cp = & current_detail->hash_table[current_index]; cp = & current_detail->hash_table[current_index];
ch = *cp; ch = *cp;
for (; ch; cp= & ch->next, ch= *cp) { for (; ch; cp= & ch->next, ch= *cp) {
...@@ -477,9 +477,9 @@ static void do_cache_clean(struct work_struct *work) ...@@ -477,9 +477,9 @@ static void do_cache_clean(struct work_struct *work)
} }
/* /*
* Clean all caches promptly. This just calls cache_clean * Clean all caches promptly. This just calls cache_clean
* repeatedly until we are sure that every cache has had a chance to * repeatedly until we are sure that every cache has had a chance to
* be fully cleaned * be fully cleaned
*/ */
void cache_flush(void) void cache_flush(void)
...@@ -508,7 +508,7 @@ void cache_purge(struct cache_detail *detail) ...@@ -508,7 +508,7 @@ void cache_purge(struct cache_detail *detail)
* All deferred requests are stored in a hash table, * All deferred requests are stored in a hash table,
* indexed by "struct cache_head *". * indexed by "struct cache_head *".
* As it may be wasteful to store a whole request * As it may be wasteful to store a whole request
* structure, we allow the request to provide a * structure, we allow the request to provide a
* deferred form, which must contain a * deferred form, which must contain a
* 'struct cache_deferred_req' * 'struct cache_deferred_req'
* This cache_deferred_req contains a method to allow * This cache_deferred_req contains a method to allow
...@@ -584,7 +584,7 @@ static void cache_revisit_request(struct cache_head *item) ...@@ -584,7 +584,7 @@ static void cache_revisit_request(struct cache_head *item)
INIT_LIST_HEAD(&pending); INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
lp = cache_defer_hash[hash].next; lp = cache_defer_hash[hash].next;
if (lp) { if (lp) {
while (lp != &cache_defer_hash[hash]) { while (lp != &cache_defer_hash[hash]) {
...@@ -614,7 +614,7 @@ void cache_clean_deferred(void *owner) ...@@ -614,7 +614,7 @@ void cache_clean_deferred(void *owner)
INIT_LIST_HEAD(&pending); INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) { if (dreq->owner == owner) {
list_del(&dreq->hash); list_del(&dreq->hash);
...@@ -639,7 +639,7 @@ void cache_clean_deferred(void *owner) ...@@ -639,7 +639,7 @@ void cache_clean_deferred(void *owner)
* On write, an update request is processed * On write, an update request is processed
* Poll works if anything to read, and always allows write * Poll works if anything to read, and always allows write
* *
* Implemented by linked list of requests. Each open file has * Implemented by linked list of requests. Each open file has
* a ->private that also exists in this list. New request are added * a ->private that also exists in this list. New request are added
* to the end and may wakeup and preceding readers. * to the end and may wakeup and preceding readers.
* New readers are added to the head. If, on read, an item is found with * New readers are added to the head. If, on read, an item is found with
...@@ -1059,10 +1059,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h) ...@@ -1059,10 +1059,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
* Messages are, like requests, separated into fields by * Messages are, like requests, separated into fields by
* spaces and dequotes as \xHEXSTRING or embedded \nnn octal * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
* *
* Message is * Message is
* reply cachename expiry key ... content.... * reply cachename expiry key ... content....
* *
* key and content are both parsed by cache * key and content are both parsed by cache
*/ */
#define isodigit(c) (isdigit(c) && c <= '7') #define isodigit(c) (isdigit(c) && c <= '7')
...@@ -1132,7 +1132,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) ...@@ -1132,7 +1132,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
unsigned hash, entry; unsigned hash, entry;
struct cache_head *ch; struct cache_head *ch;
struct cache_detail *cd = ((struct handle*)m->private)->cd; struct cache_detail *cd = ((struct handle*)m->private)->cd;
read_lock(&cd->hash_lock); read_lock(&cd->hash_lock);
if (!n--) if (!n--)
...@@ -1147,7 +1147,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) ...@@ -1147,7 +1147,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
do { do {
hash++; hash++;
n += 1LL<<32; n += 1LL<<32;
} while(hash < cd->hash_size && } while(hash < cd->hash_size &&
cd->hash_table[hash]==NULL); cd->hash_table[hash]==NULL);
if (hash >= cd->hash_size) if (hash >= cd->hash_size)
return NULL; return NULL;
......
...@@ -410,7 +410,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, ...@@ -410,7 +410,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
rpc_shutdown_client(clnt); rpc_shutdown_client(clnt);
clnt = ERR_PTR(err); clnt = ERR_PTR(err);
} }
out: out:
return clnt; return clnt;
} }
...@@ -431,7 +431,7 @@ static const struct rpc_call_ops rpc_default_ops = { ...@@ -431,7 +431,7 @@ static const struct rpc_call_ops rpc_default_ops = {
* sleeps on RPC calls * sleeps on RPC calls
*/ */
#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
static void rpc_save_sigmask(sigset_t *oldset, int intr) static void rpc_save_sigmask(sigset_t *oldset, int intr)
{ {
unsigned long sigallow = sigmask(SIGKILL); unsigned long sigallow = sigmask(SIGKILL);
...@@ -474,7 +474,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) ...@@ -474,7 +474,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
int status; int status;
/* If this client is slain all further I/O fails */ /* If this client is slain all further I/O fails */
if (clnt->cl_dead) if (clnt->cl_dead)
return -EIO; return -EIO;
BUG_ON(flags & RPC_TASK_ASYNC); BUG_ON(flags & RPC_TASK_ASYNC);
...@@ -515,7 +515,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, ...@@ -515,7 +515,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
/* If this client is slain all further I/O fails */ /* If this client is slain all further I/O fails */
status = -EIO; status = -EIO;
if (clnt->cl_dead) if (clnt->cl_dead)
goto out_release; goto out_release;
flags |= RPC_TASK_ASYNC; flags |= RPC_TASK_ASYNC;
...@@ -526,7 +526,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, ...@@ -526,7 +526,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
goto out_release; goto out_release;
/* Mask signals on GSS_AUTH upcalls */ /* Mask signals on GSS_AUTH upcalls */
rpc_task_sigmask(task, &oldset); rpc_task_sigmask(task, &oldset);
rpc_call_setup(task, msg, 0); rpc_call_setup(task, msg, 0);
...@@ -537,7 +537,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, ...@@ -537,7 +537,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
else else
rpc_put_task(task); rpc_put_task(task);
rpc_restore_sigmask(&oldset); rpc_restore_sigmask(&oldset);
return status; return status;
out_release: out_release:
rpc_release_calldata(tk_ops, data); rpc_release_calldata(tk_ops, data);
...@@ -749,7 +749,7 @@ call_allocate(struct rpc_task *task) ...@@ -749,7 +749,7 @@ call_allocate(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt; struct rpc_xprt *xprt = task->tk_xprt;
unsigned int bufsiz; unsigned int bufsiz;
dprintk("RPC: %4d call_allocate (status %d)\n", dprintk("RPC: %4d call_allocate (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
task->tk_action = call_bind; task->tk_action = call_bind;
if (req->rq_buffer) if (req->rq_buffer)
...@@ -761,7 +761,7 @@ call_allocate(struct rpc_task *task) ...@@ -761,7 +761,7 @@ call_allocate(struct rpc_task *task)
if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
return; return;
printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
if (RPC_IS_ASYNC(task) || !signalled()) { if (RPC_IS_ASYNC(task) || !signalled()) {
xprt_release(task); xprt_release(task);
...@@ -798,7 +798,7 @@ call_encode(struct rpc_task *task) ...@@ -798,7 +798,7 @@ call_encode(struct rpc_task *task)
kxdrproc_t encode; kxdrproc_t encode;
__be32 *p; __be32 *p;
dprintk("RPC: %4d call_encode (status %d)\n", dprintk("RPC: %4d call_encode (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
/* Default buffer setup */ /* Default buffer setup */
...@@ -933,7 +933,7 @@ call_connect_status(struct rpc_task *task) ...@@ -933,7 +933,7 @@ call_connect_status(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client; struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status; int status = task->tk_status;
dprintk("RPC: %5u call_connect_status (status %d)\n", dprintk("RPC: %5u call_connect_status (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
task->tk_status = 0; task->tk_status = 0;
...@@ -966,7 +966,7 @@ call_connect_status(struct rpc_task *task) ...@@ -966,7 +966,7 @@ call_connect_status(struct rpc_task *task)
static void static void
call_transmit(struct rpc_task *task) call_transmit(struct rpc_task *task)
{ {
dprintk("RPC: %4d call_transmit (status %d)\n", dprintk("RPC: %4d call_transmit (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
task->tk_action = call_status; task->tk_action = call_status;
...@@ -1028,7 +1028,7 @@ call_status(struct rpc_task *task) ...@@ -1028,7 +1028,7 @@ call_status(struct rpc_task *task)
if (req->rq_received > 0 && !req->rq_bytes_sent) if (req->rq_received > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_received; task->tk_status = req->rq_received;
dprintk("RPC: %4d call_status (status %d)\n", dprintk("RPC: %4d call_status (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
status = task->tk_status; status = task->tk_status;
...@@ -1118,7 +1118,7 @@ call_decode(struct rpc_task *task) ...@@ -1118,7 +1118,7 @@ call_decode(struct rpc_task *task)
kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p; __be32 *p;
dprintk("RPC: %4d call_decode (status %d)\n", dprintk("RPC: %4d call_decode (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (task->tk_flags & RPC_CALL_MAJORSEEN) {
...@@ -1196,7 +1196,7 @@ static void ...@@ -1196,7 +1196,7 @@ static void
call_refreshresult(struct rpc_task *task) call_refreshresult(struct rpc_task *task)
{ {
int status = task->tk_status; int status = task->tk_status;
dprintk("RPC: %4d call_refreshresult (status %d)\n", dprintk("RPC: %4d call_refreshresult (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
task->tk_status = 0; task->tk_status = 0;
......
...@@ -329,7 +329,7 @@ static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp) ...@@ -329,7 +329,7 @@ static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
static struct rpc_procinfo pmap_procedures[] = { static struct rpc_procinfo pmap_procedures[] = {
[PMAP_SET] = { [PMAP_SET] = {
.p_proc = PMAP_SET, .p_proc = PMAP_SET,
.p_encode = (kxdrproc_t) xdr_encode_mapping, .p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool, .p_decode = (kxdrproc_t) xdr_decode_bool,
.p_bufsiz = 4, .p_bufsiz = 4,
.p_count = 1, .p_count = 1,
...@@ -338,7 +338,7 @@ static struct rpc_procinfo pmap_procedures[] = { ...@@ -338,7 +338,7 @@ static struct rpc_procinfo pmap_procedures[] = {
}, },
[PMAP_UNSET] = { [PMAP_UNSET] = {
.p_proc = PMAP_UNSET, .p_proc = PMAP_UNSET,
.p_encode = (kxdrproc_t) xdr_encode_mapping, .p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool, .p_decode = (kxdrproc_t) xdr_decode_bool,
.p_bufsiz = 4, .p_bufsiz = 4,
.p_count = 1, .p_count = 1,
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Scheduling for synchronous and asynchronous RPC requests. * Scheduling for synchronous and asynchronous RPC requests.
* *
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
* *
* TCP NFS related read + write fixes * TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/ */
...@@ -307,7 +307,7 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task); ...@@ -307,7 +307,7 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
/* /*
* Make an RPC task runnable. * Make an RPC task runnable.
* *
* Note: If the task is ASYNC, this must be called with * Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation. * the spinlock held to protect the wait queue operation.
*/ */
static void rpc_make_runnable(struct rpc_task *task) static void rpc_make_runnable(struct rpc_task *task)
...@@ -646,8 +646,8 @@ static int __rpc_execute(struct rpc_task *task) ...@@ -646,8 +646,8 @@ static int __rpc_execute(struct rpc_task *task)
if (RPC_DO_CALLBACK(task)) { if (RPC_DO_CALLBACK(task)) {
/* Define a callback save pointer */ /* Define a callback save pointer */
void (*save_callback)(struct rpc_task *); void (*save_callback)(struct rpc_task *);
/* /*
* If a callback exists, save it, reset it, * If a callback exists, save it, reset it,
* call it. * call it.
* The save is needed to stop from resetting * The save is needed to stop from resetting
......
...@@ -397,7 +397,7 @@ svc_destroy(struct svc_serv *serv) ...@@ -397,7 +397,7 @@ svc_destroy(struct svc_serv *serv)
sk_list); sk_list);
svc_close_socket(svsk); svc_close_socket(svsk);
} }
cache_clean_deferred(serv); cache_clean_deferred(serv);
/* Unregister service with the portmapper */ /* Unregister service with the portmapper */
...@@ -415,7 +415,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) ...@@ -415,7 +415,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
{ {
int pages; int pages;
int arghi; int arghi;
pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
* We assume one is at most one page * We assume one is at most one page
*/ */
...@@ -514,7 +514,7 @@ choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) ...@@ -514,7 +514,7 @@ choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
if (pool != NULL) if (pool != NULL)
return pool; return pool;
return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
} }
/* /*
...@@ -530,13 +530,13 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) ...@@ -530,13 +530,13 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
} else { } else {
/* choose a pool in round-robin fashion */ /* choose a pool in round-robin fashion */
for (i = 0; i < serv->sv_nrpools; i++) { for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (!list_empty(&pool->sp_all_threads)) if (!list_empty(&pool->sp_all_threads))
goto found_pool; goto found_pool;
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
} }
return NULL; return NULL;
} }
...@@ -551,7 +551,7 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) ...@@ -551,7 +551,7 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
list_del_init(&rqstp->rq_all); list_del_init(&rqstp->rq_all);
task = rqstp->rq_task; task = rqstp->rq_task;
} }
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
return task; return task;
...@@ -636,7 +636,7 @@ svc_exit_thread(struct svc_rqst *rqstp) ...@@ -636,7 +636,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
/* /*
* Register an RPC service with the local portmapper. * Register an RPC service with the local portmapper.
* To unregister a service, call this routine with * To unregister a service, call this routine with
* proto and port == 0. * proto and port == 0.
*/ */
int int
...@@ -709,7 +709,7 @@ svc_process(struct svc_rqst *rqstp) ...@@ -709,7 +709,7 @@ svc_process(struct svc_rqst *rqstp)
goto err_short_len; goto err_short_len;
/* setup response xdr_buf. /* setup response xdr_buf.
* Initially it has just one page * Initially it has just one page
*/ */
rqstp->rq_resused = 1; rqstp->rq_resused = 1;
resv->iov_base = page_address(rqstp->rq_respages[0]); resv->iov_base = page_address(rqstp->rq_respages[0]);
...@@ -811,7 +811,7 @@ svc_process(struct svc_rqst *rqstp) ...@@ -811,7 +811,7 @@ svc_process(struct svc_rqst *rqstp)
memset(rqstp->rq_argp, 0, procp->pc_argsize); memset(rqstp->rq_argp, 0, procp->pc_argsize);
memset(rqstp->rq_resp, 0, procp->pc_ressize); memset(rqstp->rq_resp, 0, procp->pc_ressize);
/* un-reserve some of the out-queue now that we have a /* un-reserve some of the out-queue now that we have a
* better idea of reply size * better idea of reply size
*/ */
if (procp->pc_xdrressize) if (procp->pc_xdrressize)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* linux/net/sunrpc/svcauth.c * linux/net/sunrpc/svcauth.c
* *
* The generic interface for RPC authentication on the server side. * The generic interface for RPC authentication on the server side.
* *
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
* *
* CHANGES * CHANGES
...@@ -74,7 +74,7 @@ int svc_authorise(struct svc_rqst *rqstp) ...@@ -74,7 +74,7 @@ int svc_authorise(struct svc_rqst *rqstp)
int rv = 0; int rv = 0;
rqstp->rq_authop = NULL; rqstp->rq_authop = NULL;
if (aops) { if (aops) {
rv = aops->release(rqstp); rv = aops->release(rqstp);
module_put(aops->owner); module_put(aops->owner);
......
...@@ -151,7 +151,7 @@ static void ip_map_request(struct cache_detail *cd, ...@@ -151,7 +151,7 @@ static void ip_map_request(struct cache_detail *cd,
char text_addr[20]; char text_addr[20];
struct ip_map *im = container_of(h, struct ip_map, h); struct ip_map *im = container_of(h, struct ip_map, h);
__be32 addr = im->m_addr.s_addr; __be32 addr = im->m_addr.s_addr;
snprintf(text_addr, 20, "%u.%u.%u.%u", snprintf(text_addr, 20, "%u.%u.%u.%u",
ntohl(addr) >> 24 & 0xff, ntohl(addr) >> 24 & 0xff,
ntohl(addr) >> 16 & 0xff, ntohl(addr) >> 16 & 0xff,
...@@ -198,7 +198,7 @@ static int ip_map_parse(struct cache_detail *cd, ...@@ -198,7 +198,7 @@ static int ip_map_parse(struct cache_detail *cd,
if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
return -EINVAL; return -EINVAL;
expiry = get_expiry(&mesg); expiry = get_expiry(&mesg);
if (expiry ==0) if (expiry ==0)
return -EINVAL; return -EINVAL;
...@@ -248,7 +248,7 @@ static int ip_map_show(struct seq_file *m, ...@@ -248,7 +248,7 @@ static int ip_map_show(struct seq_file *m,
/* class addr domain */ /* class addr domain */
addr = im->m_addr; addr = im->m_addr;
if (test_bit(CACHE_VALID, &h->flags) && if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) !test_bit(CACHE_NEGATIVE, &h->flags))
dom = im->m_client->h.name; dom = im->m_client->h.name;
...@@ -262,7 +262,7 @@ static int ip_map_show(struct seq_file *m, ...@@ -262,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
); );
return 0; return 0;
} }
struct cache_detail ip_map_cache = { struct cache_detail ip_map_cache = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -343,7 +343,7 @@ int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) ...@@ -343,7 +343,7 @@ int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
int auth_unix_forget_old(struct auth_domain *dom) int auth_unix_forget_old(struct auth_domain *dom)
{ {
struct unix_domain *udom; struct unix_domain *udom;
if (dom->flavour != &svcauth_unix) if (dom->flavour != &svcauth_unix)
return -EINVAL; return -EINVAL;
udom = container_of(dom, struct unix_domain, h); udom = container_of(dom, struct unix_domain, h);
...@@ -465,7 +465,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) ...@@ -465,7 +465,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
if (argv->iov_len < 3*4) if (argv->iov_len < 3*4)
return SVC_GARBAGE; return SVC_GARBAGE;
if (svc_getu32(argv) != 0) { if (svc_getu32(argv) != 0) {
dprintk("svc: bad null cred\n"); dprintk("svc: bad null cred\n");
*authp = rpc_autherr_badcred; *authp = rpc_autherr_badcred;
return SVC_DENIED; return SVC_DENIED;
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
* providing that certain rules are followed: * providing that certain rules are followed:
* *
* SK_CONN, SK_DATA, can be set or cleared at any time. * SK_CONN, SK_DATA, can be set or cleared at any time.
* after a set, svc_sock_enqueue must be called. * after a set, svc_sock_enqueue must be called.
* after a clear, the socket must be read/accepted * after a clear, the socket must be read/accepted
* if this succeeds, it must be set again. * if this succeeds, it must be set again.
* SK_CLOSE can set at any time. It is never cleared. * SK_CLOSE can set at any time. It is never cleared.
...@@ -252,7 +252,7 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -252,7 +252,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
svsk->sk_sk, rqstp); svsk->sk_sk, rqstp);
svc_thread_dequeue(pool, rqstp); svc_thread_dequeue(pool, rqstp);
if (rqstp->rq_sock) if (rqstp->rq_sock)
printk(KERN_ERR printk(KERN_ERR
"svc_sock_enqueue: server %p, rq_sock=%p!\n", "svc_sock_enqueue: server %p, rq_sock=%p!\n",
rqstp, rqstp->rq_sock); rqstp, rqstp->rq_sock);
rqstp->rq_sock = svsk; rqstp->rq_sock = svsk;
...@@ -484,7 +484,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) ...@@ -484,7 +484,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
if (xdr->tail[0].iov_len) { if (xdr->tail[0].iov_len) {
result = kernel_sendpage(sock, rqstp->rq_respages[0], result = kernel_sendpage(sock, rqstp->rq_respages[0],
((unsigned long)xdr->tail[0].iov_base) ((unsigned long)xdr->tail[0].iov_base)
& (PAGE_SIZE-1), & (PAGE_SIZE-1),
xdr->tail[0].iov_len, 0); xdr->tail[0].iov_len, 0);
if (result > 0) if (result > 0)
...@@ -711,7 +711,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -711,7 +711,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
tv.tv_sec = xtime.tv_sec; tv.tv_sec = xtime.tv_sec;
tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
skb_set_timestamp(skb, &tv); skb_set_timestamp(skb, &tv);
/* Don't enable netstamp, sunrpc doesn't /* Don't enable netstamp, sunrpc doesn't
need that much accuracy */ need that much accuracy */
} }
skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
...@@ -743,7 +743,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -743,7 +743,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
return 0; return 0;
} }
local_bh_enable(); local_bh_enable();
skb_free_datagram(svsk->sk_sk, skb); skb_free_datagram(svsk->sk_sk, skb);
} else { } else {
/* we can use it in-place */ /* we can use it in-place */
rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
...@@ -794,7 +794,7 @@ svc_udp_init(struct svc_sock *svsk) ...@@ -794,7 +794,7 @@ svc_udp_init(struct svc_sock *svsk)
svsk->sk_sendto = svc_udp_sendto; svsk->sk_sendto = svc_udp_sendto;
/* initialise setting must have enough space to /* initialise setting must have enough space to
* receive and respond to one request. * receive and respond to one request.
* svc_udp_recvfrom will re-adjust if necessary * svc_udp_recvfrom will re-adjust if necessary
*/ */
svc_sock_setbufsize(svsk->sk_sock, svc_sock_setbufsize(svsk->sk_sock,
...@@ -923,7 +923,7 @@ svc_tcp_accept(struct svc_sock *svsk) ...@@ -923,7 +923,7 @@ svc_tcp_accept(struct svc_sock *svsk)
if (ntohs(sin.sin_port) >= 1024) { if (ntohs(sin.sin_port) >= 1024) {
dprintk(KERN_WARNING dprintk(KERN_WARNING
"%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
serv->sv_name, serv->sv_name,
NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
} }
...@@ -1038,7 +1038,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1038,7 +1038,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
* on the number of threads which will access the socket. * on the number of threads which will access the socket.
* *
* rcvbuf just needs to be able to hold a few requests. * rcvbuf just needs to be able to hold a few requests.
* Normally they will be removed from the queue * Normally they will be removed from the queue
* as soon a a complete request arrives. * as soon a a complete request arrives.
*/ */
svc_sock_setbufsize(svsk->sk_sock, svc_sock_setbufsize(svsk->sk_sock,
...@@ -1063,7 +1063,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1063,7 +1063,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (len < want) { if (len < want) {
dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
len, want); len, want);
svc_sock_received(svsk); svc_sock_received(svsk);
return -EAGAIN; /* record header not complete */ return -EAGAIN; /* record header not complete */
} }
...@@ -1221,7 +1221,7 @@ svc_tcp_init(struct svc_sock *svsk) ...@@ -1221,7 +1221,7 @@ svc_tcp_init(struct svc_sock *svsk)
tp->nonagle = 1; /* disable Nagle's algorithm */ tp->nonagle = 1; /* disable Nagle's algorithm */
/* initialise setting must have enough space to /* initialise setting must have enough space to
* receive and respond to one request. * receive and respond to one request.
* svc_tcp_recvfrom will re-adjust if necessary * svc_tcp_recvfrom will re-adjust if necessary
*/ */
svc_sock_setbufsize(svsk->sk_sock, svc_sock_setbufsize(svsk->sk_sock,
...@@ -1230,7 +1230,7 @@ svc_tcp_init(struct svc_sock *svsk) ...@@ -1230,7 +1230,7 @@ svc_tcp_init(struct svc_sock *svsk)
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(SK_CHNGBUF, &svsk->sk_flags);
set_bit(SK_DATA, &svsk->sk_flags); set_bit(SK_DATA, &svsk->sk_flags);
if (sk->sk_state != TCP_ESTABLISHED) if (sk->sk_state != TCP_ESTABLISHED)
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(SK_CLOSE, &svsk->sk_flags);
} }
} }
...@@ -1246,7 +1246,7 @@ svc_sock_update_bufs(struct svc_serv *serv) ...@@ -1246,7 +1246,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
spin_lock_bh(&serv->sv_lock); spin_lock_bh(&serv->sv_lock);
list_for_each(le, &serv->sv_permsocks) { list_for_each(le, &serv->sv_permsocks) {
struct svc_sock *svsk = struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list); list_entry(le, struct svc_sock, sk_list);
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(SK_CHNGBUF, &svsk->sk_flags);
} }
...@@ -1278,11 +1278,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -1278,11 +1278,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
rqstp, timeout); rqstp, timeout);
if (rqstp->rq_sock) if (rqstp->rq_sock)
printk(KERN_ERR printk(KERN_ERR
"svc_recv: service %p, socket not NULL!\n", "svc_recv: service %p, socket not NULL!\n",
rqstp); rqstp);
if (waitqueue_active(&rqstp->rq_wait)) if (waitqueue_active(&rqstp->rq_wait))
printk(KERN_ERR printk(KERN_ERR
"svc_recv: service %p, wait queue active!\n", "svc_recv: service %p, wait queue active!\n",
rqstp); rqstp);
...@@ -1371,7 +1371,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -1371,7 +1371,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
return len; return len;
} }
/* /*
* Drop request * Drop request
*/ */
void void
...@@ -1651,7 +1651,7 @@ svc_delete_socket(struct svc_sock *svsk) ...@@ -1651,7 +1651,7 @@ svc_delete_socket(struct svc_sock *svsk)
if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
list_del_init(&svsk->sk_list); list_del_init(&svsk->sk_list);
/* /*
* We used to delete the svc_sock from whichever list * We used to delete the svc_sock from whichever list
* it's sk_ready node was on, but we don't actually * it's sk_ready node was on, but we don't actually
* need to. This is because the only time we're called * need to. This is because the only time we're called
...@@ -1697,7 +1697,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port) ...@@ -1697,7 +1697,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
} }
/* /*
* Handle defer and revisit of requests * Handle defer and revisit of requests
*/ */
static void svc_revisit(struct cache_deferred_req *dreq, int too_many) static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
...@@ -1776,7 +1776,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp) ...@@ -1776,7 +1776,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
{ {
struct svc_deferred_req *dr = NULL; struct svc_deferred_req *dr = NULL;
if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
return NULL; return NULL;
spin_lock_bh(&svsk->sk_defer_lock); spin_lock_bh(&svsk->sk_defer_lock);
......
...@@ -42,7 +42,7 @@ rpc_register_sysctl(void) ...@@ -42,7 +42,7 @@ rpc_register_sysctl(void)
sunrpc_table[0].de->owner = THIS_MODULE; sunrpc_table[0].de->owner = THIS_MODULE;
#endif #endif
} }
} }
void void
...@@ -126,7 +126,7 @@ static ctl_table debug_table[] = { ...@@ -126,7 +126,7 @@ static ctl_table debug_table[] = {
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dodebug .proc_handler = &proc_dodebug
}, },
{ {
.ctl_name = CTL_NFSDEBUG, .ctl_name = CTL_NFSDEBUG,
.procname = "nfs_debug", .procname = "nfs_debug",
...@@ -134,7 +134,7 @@ static ctl_table debug_table[] = { ...@@ -134,7 +134,7 @@ static ctl_table debug_table[] = {
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dodebug .proc_handler = &proc_dodebug
}, },
{ {
.ctl_name = CTL_NFSDDEBUG, .ctl_name = CTL_NFSDDEBUG,
.procname = "nfsd_debug", .procname = "nfsd_debug",
...@@ -142,7 +142,7 @@ static ctl_table debug_table[] = { ...@@ -142,7 +142,7 @@ static ctl_table debug_table[] = {
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dodebug .proc_handler = &proc_dodebug
}, },
{ {
.ctl_name = CTL_NLMDEBUG, .ctl_name = CTL_NLMDEBUG,
.procname = "nlm_debug", .procname = "nlm_debug",
...@@ -150,7 +150,7 @@ static ctl_table debug_table[] = { ...@@ -150,7 +150,7 @@ static ctl_table debug_table[] = {
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dodebug .proc_handler = &proc_dodebug
}, },
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
......
...@@ -302,7 +302,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) ...@@ -302,7 +302,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
* @buf: xdr_buf * @buf: xdr_buf
* @len: bytes to remove from buf->head[0] * @len: bytes to remove from buf->head[0]
* *
* Shrinks XDR buffer's header kvec buf->head[0] by * Shrinks XDR buffer's header kvec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead * 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail. * moved into the inlined pages and/or the tail.
*/ */
...@@ -375,7 +375,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) ...@@ -375,7 +375,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
* @buf: xdr_buf * @buf: xdr_buf
* @len: bytes to remove from buf->pages * @len: bytes to remove from buf->pages
* *
* Shrinks XDR buffer's page array buf->pages by * Shrinks XDR buffer's page array buf->pages by
* 'len' bytes. The extra data is not lost, but is instead * 'len' bytes. The extra data is not lost, but is instead
* moved into the tail. * moved into the tail.
*/ */
...@@ -1024,7 +1024,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base, ...@@ -1024,7 +1024,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
int int
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
int (*actor)(struct scatterlist *, void *), void *data) int (*actor)(struct scatterlist *, void *), void *data)
{ {
int i, ret = 0; int i, ret = 0;
unsigned page_len, thislen, page_offset; unsigned page_len, thislen, page_offset;
......
...@@ -410,7 +410,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task) ...@@ -410,7 +410,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
/* /*
* xprt_set_retrans_timeout_rtt - set a request's retransmit timeout * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
* @task: task whose timeout is to be set * @task: task whose timeout is to be set
* *
* Set a request's retransmit timeout using the RTT estimator. * Set a request's retransmit timeout using the RTT estimator.
*/ */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task) void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
...@@ -873,7 +873,7 @@ void xprt_release(struct rpc_task *task) ...@@ -873,7 +873,7 @@ void xprt_release(struct rpc_task *task)
*/ */
void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
{ {
to->to_initval = to->to_initval =
to->to_increment = incr; to->to_increment = incr;
to->to_maxval = to->to_initval + (incr * retr); to->to_maxval = to->to_initval + (incr * retr);
to->to_retries = retr; to->to_retries = retr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment