Commit 31c9590a authored by Chuck Lever's avatar Chuck Lever

SUNRPC: Add "@len" parameter to gss_unwrap()

Refactor: This is a pre-requisite to fixing the client-side ralign
computation in gss_unwrap_resp_priv().

The length value is passed in explicitly rather that as the value
of buf->len. This will subsequently allow gss_unwrap_kerberos_v1()
to compute a slack and align value, instead of computing it in
gss_unwrap_resp_priv().

Fixes: 35e77d21 ("SUNRPC: Add rpc_auth::au_ralign field")
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 6a8b55ed
...@@ -66,6 +66,7 @@ u32 gss_wrap( ...@@ -66,6 +66,7 @@ u32 gss_wrap(
u32 gss_unwrap( u32 gss_unwrap(
struct gss_ctx *ctx_id, struct gss_ctx *ctx_id,
int offset, int offset,
int len,
struct xdr_buf *inbuf); struct xdr_buf *inbuf);
u32 gss_delete_sec_context( u32 gss_delete_sec_context(
struct gss_ctx **ctx_id); struct gss_ctx **ctx_id);
...@@ -126,6 +127,7 @@ struct gss_api_ops { ...@@ -126,6 +127,7 @@ struct gss_api_ops {
u32 (*gss_unwrap)( u32 (*gss_unwrap)(
struct gss_ctx *ctx_id, struct gss_ctx *ctx_id,
int offset, int offset,
int len,
struct xdr_buf *buf); struct xdr_buf *buf);
void (*gss_delete_sec_context)( void (*gss_delete_sec_context)(
void *internal_ctx_id); void *internal_ctx_id);
......
...@@ -83,7 +83,7 @@ struct gss_krb5_enctype { ...@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct xdr_buf *buf,
struct page **pages); /* v2 encryption function */ struct page **pages); /* v2 encryption function */
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip, struct xdr_buf *buf, u32 *headskip,
u32 *tailskip); /* v2 decryption function */ u32 *tailskip); /* v2 decryption function */
}; };
...@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, ...@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
struct xdr_buf *outbuf, struct page **pages); struct xdr_buf *outbuf, struct page **pages);
u32 u32
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
struct xdr_buf *buf); struct xdr_buf *buf);
...@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, ...@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct page **pages); struct page **pages);
u32 u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *plainoffset, struct xdr_buf *buf, u32 *plainoffset,
u32 *plainlen); u32 *plainlen);
......
...@@ -2043,9 +2043,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, ...@@ -2043,9 +2043,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
offset = (u8 *)(p) - (u8 *)head->iov_base; offset = (u8 *)(p) - (u8 *)head->iov_base;
if (offset + opaque_len > rcv_buf->len) if (offset + opaque_len > rcv_buf->len)
goto unwrap_failed; goto unwrap_failed;
rcv_buf->len = offset + opaque_len;
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
offset + opaque_len, rcv_buf);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE) if (maj_stat != GSS_S_COMPLETE)
......
...@@ -851,8 +851,8 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, ...@@ -851,8 +851,8 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
} }
u32 u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
u32 *headskip, u32 *tailskip) struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
{ {
struct xdr_buf subbuf; struct xdr_buf subbuf;
u32 ret = 0; u32 ret = 0;
...@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, ...@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
/* create a segment skipping the header and leaving out the checksum */ /* create a segment skipping the header and leaving out the checksum */
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
(buf->len - offset - GSS_KRB5_TOK_HDR_LEN - (len - offset - GSS_KRB5_TOK_HDR_LEN -
kctx->gk5e->cksumlength)); kctx->gk5e->cksumlength));
nblocks = (subbuf.len + blocksize - 1) / blocksize; nblocks = (subbuf.len + blocksize - 1) / blocksize;
...@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, ...@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
goto out_err; goto out_err;
/* Get the packet's hmac value */ /* Get the packet's hmac value */
ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
pkt_hmac, kctx->gk5e->cksumlength); pkt_hmac, kctx->gk5e->cksumlength);
if (ret) if (ret)
goto out_err; goto out_err;
......
...@@ -261,7 +261,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, ...@@ -261,7 +261,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
} }
static u32 static u32
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf)
{ {
int signalg; int signalg;
int sealalg; int sealalg;
...@@ -284,7 +285,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -284,7 +285,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
ptr = (u8 *)buf->head[0].iov_base + offset; ptr = (u8 *)buf->head[0].iov_base + offset;
if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
buf->len - offset)) len - offset))
return GSS_S_DEFECTIVE_TOKEN; return GSS_S_DEFECTIVE_TOKEN;
if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
...@@ -324,6 +325,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -324,6 +325,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
(!kctx->initiate && direction != 0)) (!kctx->initiate && direction != 0))
return GSS_S_BAD_SIG; return GSS_S_BAD_SIG;
buf->len = len;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
struct crypto_sync_skcipher *cipher; struct crypto_sync_skcipher *cipher;
int err; int err;
...@@ -376,7 +378,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -376,7 +378,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
memmove(orig_start, data_start, data_len); memmove(orig_start, data_start, data_len);
buf->head[0].iov_len -= (data_start - orig_start); buf->head[0].iov_len -= (data_start - orig_start);
buf->len -= (data_start - orig_start); buf->len = len - (data_start - orig_start);
if (gss_krb5_remove_padding(buf, blocksize)) if (gss_krb5_remove_padding(buf, blocksize))
return GSS_S_DEFECTIVE_TOKEN; return GSS_S_DEFECTIVE_TOKEN;
...@@ -486,7 +488,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, ...@@ -486,7 +488,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
} }
static u32 static u32
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf)
{ {
time64_t now; time64_t now;
u8 *ptr; u8 *ptr;
...@@ -532,7 +535,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -532,7 +535,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
if (rrc != 0) if (rrc != 0)
rotate_left(offset + 16, buf, rrc); rotate_left(offset + 16, buf, rrc);
err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
&headskip, &tailskip); &headskip, &tailskip);
if (err) if (err)
return GSS_S_FAILURE; return GSS_S_FAILURE;
...@@ -542,7 +545,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -542,7 +545,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
* it against the original * it against the original
*/ */
err = read_bytes_from_xdr_buf(buf, err = read_bytes_from_xdr_buf(buf,
buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, len - GSS_KRB5_TOK_HDR_LEN - tailskip,
decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
if (err) { if (err) {
dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
...@@ -568,14 +571,14 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) ...@@ -568,14 +571,14 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
* Note that buf->head[0].iov_len may indicate the available * Note that buf->head[0].iov_len may indicate the available
* head buffer space rather than that actually occupied. * head buffer space rather than that actually occupied.
*/ */
movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); movelen = min_t(unsigned int, buf->head[0].iov_len, len);
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
buf->head[0].iov_len) buf->head[0].iov_len)
return GSS_S_FAILURE; return GSS_S_FAILURE;
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
/* Trim off the trailing "extra count" and checksum blob */ /* Trim off the trailing "extra count" and checksum blob */
buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip; buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
...@@ -603,7 +606,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, ...@@ -603,7 +606,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
} }
u32 u32
gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
int len, struct xdr_buf *buf)
{ {
struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_ctx *kctx = gctx->internal_ctx_id;
...@@ -613,9 +617,9 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) ...@@ -613,9 +617,9 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC:
return gss_unwrap_kerberos_v1(kctx, offset, buf); return gss_unwrap_kerberos_v1(kctx, offset, len, buf);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return gss_unwrap_kerberos_v2(kctx, offset, buf); return gss_unwrap_kerberos_v2(kctx, offset, len, buf);
} }
} }
...@@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx *ctx_id, ...@@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx *ctx_id,
u32 u32
gss_unwrap(struct gss_ctx *ctx_id, gss_unwrap(struct gss_ctx *ctx_id,
int offset, int offset,
int len,
struct xdr_buf *buf) struct xdr_buf *buf)
{ {
return ctx_id->mech_type->gm_ops return ctx_id->mech_type->gm_ops
->gss_unwrap(ctx_id, offset, buf); ->gss_unwrap(ctx_id, offset, len, buf);
} }
......
...@@ -934,7 +934,7 @@ static int ...@@ -934,7 +934,7 @@ static int
unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{ {
u32 priv_len, maj_stat; u32 priv_len, maj_stat;
int pad, saved_len, remaining_len, offset; int pad, remaining_len, offset;
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
...@@ -954,12 +954,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs ...@@ -954,12 +954,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
buf->len -= pad; buf->len -= pad;
fix_priv_head(buf, pad); fix_priv_head(buf, pad);
/* Maybe it would be better to give gss_unwrap a length parameter: */ maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
saved_len = buf->len;
buf->len = priv_len;
maj_stat = gss_unwrap(ctx, 0, buf);
pad = priv_len - buf->len; pad = priv_len - buf->len;
buf->len = saved_len;
buf->len -= pad; buf->len -= pad;
/* The upper layers assume the buffer is aligned on 4-byte boundaries. /* The upper layers assume the buffer is aligned on 4-byte boundaries.
* In the krb5p case, at least, the data ends up offset, so we need to * In the krb5p case, at least, the data ends up offset, so we need to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment