Commit 571ed1fd authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Replace krb5_seq_lock with a lockless scheme

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 0c1c19f4
...@@ -118,7 +118,8 @@ struct krb5_ctx { ...@@ -118,7 +118,8 @@ struct krb5_ctx {
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
}; };
extern spinlock_t krb5_seq_lock; extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
/* The length of the Kerberos GSS token header */ /* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16) #define GSS_KRB5_TOK_HDR_LEN (16)
......
...@@ -68,8 +68,6 @@ ...@@ -68,8 +68,6 @@
# define RPCDBG_FACILITY RPCDBG_AUTH # define RPCDBG_FACILITY RPCDBG_AUTH
#endif #endif
DEFINE_SPINLOCK(krb5_seq_lock);
static void * static void *
setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
{ {
...@@ -124,6 +122,30 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) ...@@ -124,6 +122,30 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
return krb5_hdr; return krb5_hdr;
} }
u32
gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
{
u32 old, seq_send = READ_ONCE(ctx->seq_send);
do {
old = seq_send;
seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
} while (old != seq_send);
return seq_send;
}
u64
gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
{
u64 old, seq_send = READ_ONCE(ctx->seq_send);
do {
old = seq_send;
seq_send = cmpxchg(&ctx->seq_send64, old, old + 1);
} while (old != seq_send);
return seq_send;
}
static u32 static u32
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token) struct xdr_netobj *token)
...@@ -154,9 +176,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, ...@@ -154,9 +176,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
spin_lock(&krb5_seq_lock); seq_send = gss_seq_send_fetch_and_inc(ctx);
seq_send = ctx->seq_send++;
spin_unlock(&krb5_seq_lock);
if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
...@@ -174,7 +194,6 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, ...@@ -174,7 +194,6 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
.data = cksumdata}; .data = cksumdata};
void *krb5_hdr; void *krb5_hdr;
s32 now; s32 now;
u64 seq_send;
u8 *cksumkey; u8 *cksumkey;
unsigned int cksum_usage; unsigned int cksum_usage;
__be64 seq_send_be64; __be64 seq_send_be64;
...@@ -185,11 +204,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, ...@@ -185,11 +204,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
/* Set up the sequence number. Now 64-bits in clear /* Set up the sequence number. Now 64-bits in clear
* text and w/o direction indicator */ * text and w/o direction indicator */
spin_lock(&krb5_seq_lock); seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx));
seq_send = ctx->seq_send64++;
spin_unlock(&krb5_seq_lock);
seq_send_be64 = cpu_to_be64(seq_send);
memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8); memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
if (ctx->initiate) { if (ctx->initiate) {
......
...@@ -228,9 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, ...@@ -228,9 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
spin_lock(&krb5_seq_lock); seq_send = gss_seq_send_fetch_and_inc(kctx);
seq_send = kctx->seq_send++;
spin_unlock(&krb5_seq_lock);
/* XXX would probably be more efficient to compute checksum /* XXX would probably be more efficient to compute checksum
* and encrypt at the same time: */ * and encrypt at the same time: */
...@@ -477,9 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, ...@@ -477,9 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
*be16ptr++ = 0; *be16ptr++ = 0;
be64ptr = (__be64 *)be16ptr; be64ptr = (__be64 *)be16ptr;
spin_lock(&krb5_seq_lock); *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
*be64ptr = cpu_to_be64(kctx->seq_send64++);
spin_unlock(&krb5_seq_lock);
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment