Mailing List Archive

[PATCH 5/7] cipher/bulkhelp: add functions for CTR/CBC/CFB/OCB bulk processing
* cipher/bulkhelp.h (bulk_crypt_fn_t, bulk_ctr_enc_128)
(bulk_cbc_dec_128, bulk_cfb_dec_128, bulk_ocb_crypt_128)
(bulk_ocb_auth_128): New.
* cipher/sm4.c (_gcry_sm4_ctr_enc, _gcry_sm4_cbc_dec)
(_gcry_sm4_cfb_dec, _gcry_sm4_ocb_crypt, _gcry_sm4_ocb_auth): Switch
to use helper functions from 'bulkhelp.h'.
--

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
---
cipher/bulkhelp.h | 225 ++++++++++++++++++++++++++++++++++++++++++++++
cipher/sm4.c | 184 ++++++++-----------------------------
2 files changed, 260 insertions(+), 149 deletions(-)

diff --git a/cipher/bulkhelp.h b/cipher/bulkhelp.h
index 72668d42..c9ecaba6 100644
--- a/cipher/bulkhelp.h
+++ b/cipher/bulkhelp.h
@@ -32,6 +32,10 @@ typedef u64 ocb_L_uintptr_t;
typedef uintptr_t ocb_L_uintptr_t;
#endif

+typedef unsigned int (*bulk_crypt_fn_t) (const void *ctx, byte *out,
+ const byte *in,
+ unsigned int num_blks);
+

static inline ocb_L_uintptr_t *
bulk_ocb_prepare_L_pointers_array_blk32 (gcry_cipher_hd_t c,
@@ -100,4 +104,225 @@ bulk_ocb_prepare_L_pointers_array_blk8 (gcry_cipher_hd_t c,
}


+static inline unsigned int
+bulk_ctr_enc_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf,
+ const byte *inbuf, size_t nblocks, byte *ctr,
+ byte *tmpbuf, size_t tmpbuf_nblocks,
+ unsigned int *num_used_tmpblocks)
+{
+ unsigned int tmp_used = 16;
+ unsigned int burn_depth = 0;
+ unsigned int nburn;
+
+ while (nblocks >= 1)
+ {
+ size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks;
+ size_t i;
+
+ if (curr_blks * 16 > tmp_used)
+ tmp_used = curr_blks * 16;
+
+ cipher_block_cpy (tmpbuf + 0 * 16, ctr, 16);
+ for (i = 1; i < curr_blks; i++)
+ {
+ cipher_block_cpy (&tmpbuf[i * 16], ctr, 16);
+ cipher_block_add (&tmpbuf[i * 16], i, 16);
+ }
+ cipher_block_add (ctr, curr_blks, 16);
+
+ nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks);
+ burn_depth = nburn > burn_depth ? nburn : burn_depth;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ cipher_block_xor (outbuf, &tmpbuf[i * 16], inbuf, 16);
+ outbuf += 16;
+ inbuf += 16;
+ }
+
+ nblocks -= curr_blks;
+ }
+
+ *num_used_tmpblocks = tmp_used;
+ return burn_depth;
+}
+
+
+static inline unsigned int
+bulk_cbc_dec_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf,
+ const byte *inbuf, size_t nblocks, byte *iv,
+ byte *tmpbuf, size_t tmpbuf_nblocks,
+ unsigned int *num_used_tmpblocks)
+{
+ unsigned int tmp_used = 16;
+ unsigned int burn_depth = 0;
+ unsigned int nburn;
+
+ while (nblocks >= 1)
+ {
+ size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks;
+ size_t i;
+
+ if (curr_blks * 16 > tmp_used)
+ tmp_used = curr_blks * 16;
+
+ nburn = crypt_fn (priv, tmpbuf, inbuf, curr_blks);
+ burn_depth = nburn > burn_depth ? nburn : burn_depth;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ cipher_block_xor_n_copy_2(outbuf, &tmpbuf[i * 16], iv, inbuf, 16);
+ outbuf += 16;
+ inbuf += 16;
+ }
+
+ nblocks -= curr_blks;
+ }
+
+ *num_used_tmpblocks = tmp_used;
+ return burn_depth;
+}
+
+
+static inline unsigned int
+bulk_cfb_dec_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf,
+ const byte *inbuf, size_t nblocks, byte *iv,
+ byte *tmpbuf, size_t tmpbuf_nblocks,
+ unsigned int *num_used_tmpblocks)
+{
+ unsigned int tmp_used = 16;
+ unsigned int burn_depth = 0;
+ unsigned int nburn;
+
+ while (nblocks >= 1)
+ {
+ size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks;
+ size_t i;
+
+ if (curr_blks * 16 > tmp_used)
+ tmp_used = curr_blks * 16;
+
+ cipher_block_cpy (&tmpbuf[0 * 16], iv, 16);
+ if (curr_blks > 1)
+ memcpy (&tmpbuf[1 * 16], &inbuf[(1 - 1) * 16], 16 * curr_blks - 16);
+ cipher_block_cpy (iv, &inbuf[(curr_blks - 1) * 16], 16);
+
+ nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks);
+ burn_depth = nburn > burn_depth ? nburn : burn_depth;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ cipher_block_xor (outbuf, inbuf, &tmpbuf[i * 16], 16);
+ outbuf += 16;
+ inbuf += 16;
+ }
+
+ nblocks -= curr_blks;
+ }
+
+ *num_used_tmpblocks = tmp_used;
+ return burn_depth;
+}
+
+
+static inline unsigned int
+bulk_ocb_crypt_128 (gcry_cipher_hd_t c, void *priv, bulk_crypt_fn_t crypt_fn,
+ byte *outbuf, const byte *inbuf, size_t nblocks, u64 *blkn,
+ int encrypt, byte *tmpbuf, size_t tmpbuf_nblocks,
+ unsigned int *num_used_tmpblocks)
+{
+ unsigned int tmp_used = 16;
+ unsigned int burn_depth = 0;
+ unsigned int nburn;
+
+ while (nblocks >= 1)
+ {
+ size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks;
+ size_t i;
+
+ if (curr_blks * 16 > tmp_used)
+ tmp_used = curr_blks * 16;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ const unsigned char *l = ocb_get_l(c, ++*blkn);
+
+ /* Checksum_i = Checksum_{i-1} xor P_i */
+ if (encrypt)
+ cipher_block_xor_1(c->u_ctr.ctr, &inbuf[i * 16], 16);
+
+ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
+ cipher_block_xor_2dst (&tmpbuf[i * 16], c->u_iv.iv, l, 16);
+ cipher_block_xor (&outbuf[i * 16], &inbuf[i * 16],
+ c->u_iv.iv, 16);
+ }
+
+ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
+ nburn = crypt_fn (priv, outbuf, outbuf, curr_blks);
+ burn_depth = nburn > burn_depth ? nburn : burn_depth;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ cipher_block_xor_1 (&outbuf[i * 16], &tmpbuf[i * 16], 16);
+
+ /* Checksum_i = Checksum_{i-1} xor P_i */
+ if (!encrypt)
+ cipher_block_xor_1(c->u_ctr.ctr, &outbuf[i * 16], 16);
+ }
+
+ outbuf += curr_blks * 16;
+ inbuf += curr_blks * 16;
+ nblocks -= curr_blks;
+ }
+
+ *num_used_tmpblocks = tmp_used;
+ return burn_depth;
+}
+
+
+static inline unsigned int
+bulk_ocb_auth_128 (gcry_cipher_hd_t c, void *priv, bulk_crypt_fn_t crypt_fn,
+ const byte *abuf, size_t nblocks, u64 *blkn, byte *tmpbuf,
+ size_t tmpbuf_nblocks, unsigned int *num_used_tmpblocks)
+{
+ unsigned int tmp_used = 16;
+ unsigned int burn_depth = 0;
+ unsigned int nburn;
+
+ while (nblocks >= 1)
+ {
+ size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks;
+ size_t i;
+
+ if (curr_blks * 16 > tmp_used)
+ tmp_used = curr_blks * 16;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ const unsigned char *l = ocb_get_l(c, ++*blkn);
+
+ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
+ cipher_block_xor_2dst (&tmpbuf[i * 16],
+ c->u_mode.ocb.aad_offset, l, 16);
+ cipher_block_xor_1 (&tmpbuf[i * 16], &abuf[i * 16], 16);
+ }
+
+ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
+ nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks);
+ burn_depth = nburn > burn_depth ? nburn : burn_depth;
+
+ for (i = 0; i < curr_blks; i++)
+ {
+ cipher_block_xor_1 (c->u_mode.ocb.aad_sum, &tmpbuf[i * 16], 16);
+ }
+
+ abuf += curr_blks * 16;
+ nblocks -= curr_blks;
+ }
+
+ *num_used_tmpblocks = tmp_used;
+ return burn_depth;
+}
+
+
#endif /*GCRYPT_BULKHELP_H*/
diff --git a/cipher/sm4.c b/cipher/sm4.c
index 0148365c..4815b184 100644
--- a/cipher/sm4.c
+++ b/cipher/sm4.c
@@ -748,36 +748,12 @@ _gcry_sm4_ctr_enc(void *context, unsigned char *ctr,
crypt_blk1_8_fn_t crypt_blk1_8 = sm4_get_crypt_blk1_8_fn(ctx);
byte tmpbuf[16 * 8];
unsigned int tmp_used = 16;
+ size_t nburn;

- /* Process remaining blocks. */
- while (nblocks)
- {
- size_t curr_blks = nblocks > 8 ? 8 : nblocks;
- size_t i;
-
- if (curr_blks * 16 > tmp_used)
- tmp_used = curr_blks * 16;
-
- cipher_block_cpy (tmpbuf + 0 * 16, ctr, 16);
- for (i = 1; i < curr_blks; i++)
- {
- cipher_block_cpy (&tmpbuf[i * 16], ctr, 16);
- cipher_block_add (&tmpbuf[i * 16], i, 16);
- }
- cipher_block_add (ctr, curr_blks, 16);
-
- burn_stack_depth = crypt_blk1_8 (ctx->rkey_enc, tmpbuf, tmpbuf,
- curr_blks);
-
- for (i = 0; i < curr_blks; i++)
- {
- cipher_block_xor (outbuf, &tmpbuf[i * 16], inbuf, 16);
- outbuf += 16;
- inbuf += 16;
- }
-
- nblocks -= curr_blks;
- }
+ nburn = bulk_ctr_enc_128(ctx->rkey_enc, crypt_blk1_8, outbuf, inbuf,
+ nblocks, ctr, tmpbuf, sizeof(tmpbuf) / 16,
+ &tmp_used);
+ burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;

wipememory(tmpbuf, tmp_used);
}
@@ -866,33 +842,16 @@ _gcry_sm4_cbc_dec(void *context, unsigned char *iv,
if (nblocks)
{
crypt_blk1_8_fn_t crypt_blk1_8 = sm4_get_crypt_blk1_8_fn(ctx);
- unsigned char savebuf[16 * 8];
+ unsigned char tmpbuf[16 * 8];
unsigned int tmp_used = 16;
+ size_t nburn;

- /* Process remaining blocks. */
- while (nblocks)
- {
- size_t curr_blks = nblocks > 8 ? 8 : nblocks;
- size_t i;
-
- if (curr_blks * 16 > tmp_used)
- tmp_used = curr_blks * 16;
-
- burn_stack_depth = crypt_blk1_8 (ctx->rkey_dec, savebuf, inbuf,
- curr_blks);
+ nburn = bulk_cbc_dec_128(ctx->rkey_dec, crypt_blk1_8, outbuf, inbuf,
+ nblocks, iv, tmpbuf, sizeof(tmpbuf) / 16,
+ &tmp_used);
+ burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;

- for (i = 0; i < curr_blks; i++)
- {
- cipher_block_xor_n_copy_2(outbuf, &savebuf[i * 16], iv, inbuf,
- 16);
- outbuf += 16;
- inbuf += 16;
- }
-
- nblocks -= curr_blks;
- }
-
- wipememory(savebuf, tmp_used);
+ wipememory(tmpbuf, tmp_used);
}

if (burn_stack_depth)
@@ -979,37 +938,16 @@ _gcry_sm4_cfb_dec(void *context, unsigned char *iv,
if (nblocks)
{
crypt_blk1_8_fn_t crypt_blk1_8 = sm4_get_crypt_blk1_8_fn(ctx);
- unsigned char ivbuf[16 * 8];
+ unsigned char tmpbuf[16 * 8];
unsigned int tmp_used = 16;
+ size_t nburn;

- /* Process remaining blocks. */
- while (nblocks)
- {
- size_t curr_blks = nblocks > 8 ? 8 : nblocks;
- size_t i;
-
- if (curr_blks * 16 > tmp_used)
- tmp_used = curr_blks * 16;
-
- cipher_block_cpy (&ivbuf[0 * 16], iv, 16);
- for (i = 1; i < curr_blks; i++)
- cipher_block_cpy (&ivbuf[i * 16], &inbuf[(i - 1) * 16], 16);
- cipher_block_cpy (iv, &inbuf[(i - 1) * 16], 16);
+ nburn = bulk_cfb_dec_128(ctx->rkey_enc, crypt_blk1_8, outbuf, inbuf,
+ nblocks, iv, tmpbuf, sizeof(tmpbuf) / 16,
+ &tmp_used);
+ burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;

- burn_stack_depth = crypt_blk1_8 (ctx->rkey_enc, ivbuf, ivbuf,
- curr_blks);
-
- for (i = 0; i < curr_blks; i++)
- {
- cipher_block_xor (outbuf, inbuf, &ivbuf[i * 16], 16);
- outbuf += 16;
- inbuf += 16;
- }
-
- nblocks -= curr_blks;
- }
-
- wipememory(ivbuf, tmp_used);
+ wipememory(tmpbuf, tmp_used);
}

if (burn_stack_depth)
@@ -1089,51 +1027,19 @@ _gcry_sm4_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
}
#endif

+ /* Process remaining blocks. */
if (nblocks)
{
crypt_blk1_8_fn_t crypt_blk1_8 = sm4_get_crypt_blk1_8_fn(ctx);
- const u32 *rk = encrypt ? ctx->rkey_enc : ctx->rkey_dec;
+ u32 *rk = encrypt ? ctx->rkey_enc : ctx->rkey_dec;
unsigned char tmpbuf[16 * 8];
unsigned int tmp_used = 16;
+ size_t nburn;

- while (nblocks)
- {
- size_t curr_blks = nblocks > 8 ? 8 : nblocks;
- size_t i;
-
- if (curr_blks * 16 > tmp_used)
- tmp_used = curr_blks * 16;
-
- for (i = 0; i < curr_blks; i++)
- {
- const unsigned char *l = ocb_get_l(c, ++blkn);
-
- /* Checksum_i = Checksum_{i-1} xor P_i */
- if (encrypt)
- cipher_block_xor_1(c->u_ctr.ctr, &inbuf[i * 16], 16);
-
- /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
- cipher_block_xor_2dst (&tmpbuf[i * 16], c->u_iv.iv, l, 16);
- cipher_block_xor (&outbuf[i * 16], &inbuf[i * 16],
- c->u_iv.iv, 16);
- }
-
- /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
- crypt_blk1_8 (rk, outbuf, outbuf, curr_blks);
-
- for (i = 0; i < curr_blks; i++)
- {
- cipher_block_xor_1 (&outbuf[i * 16], &tmpbuf[i * 16], 16);
-
- /* Checksum_i = Checksum_{i-1} xor P_i */
- if (!encrypt)
- cipher_block_xor_1(c->u_ctr.ctr, &outbuf[i * 16], 16);
- }
-
- outbuf += curr_blks * 16;
- inbuf += curr_blks * 16;
- nblocks -= curr_blks;
- }
+ nburn = bulk_ocb_crypt_128 (c, rk, crypt_blk1_8, outbuf, inbuf, nblocks,
+ &blkn, encrypt, tmpbuf, sizeof(tmpbuf) / 16,
+ &tmp_used);
+ burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;

wipememory(tmpbuf, tmp_used);
}
@@ -1153,6 +1059,7 @@ _gcry_sm4_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks)
SM4_context *ctx = (void *)&c->context.c;
const unsigned char *abuf = abuf_arg;
u64 blkn = c->u_mode.ocb.aad_nblocks;
+ int burn_stack_depth = 0;

#ifdef USE_AESNI_AVX2
if (ctx->use_aesni_avx2)
@@ -1208,47 +1115,26 @@ _gcry_sm4_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks)
}
#endif

+ /* Process remaining blocks. */
if (nblocks)
{
crypt_blk1_8_fn_t crypt_blk1_8 = sm4_get_crypt_blk1_8_fn(ctx);
unsigned char tmpbuf[16 * 8];
unsigned int tmp_used = 16;
+ size_t nburn;

- while (nblocks)
- {
- size_t curr_blks = nblocks > 8 ? 8 : nblocks;
- size_t i;
-
- if (curr_blks * 16 > tmp_used)
- tmp_used = curr_blks * 16;
-
- for (i = 0; i < curr_blks; i++)
- {
- const unsigned char *l = ocb_get_l(c, ++blkn);
-
- /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
- cipher_block_xor_2dst (&tmpbuf[i * 16],
- c->u_mode.ocb.aad_offset, l, 16);
- cipher_block_xor_1 (&tmpbuf[i * 16], &abuf[i * 16], 16);
- }
-
- /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
- crypt_blk1_8 (ctx->rkey_enc, tmpbuf, tmpbuf, curr_blks);
-
- for (i = 0; i < curr_blks; i++)
- {
- cipher_block_xor_1 (c->u_mode.ocb.aad_sum, &tmpbuf[i * 16], 16);
- }
-
- abuf += curr_blks * 16;
- nblocks -= curr_blks;
- }
+ nburn = bulk_ocb_auth_128 (c, ctx->rkey_enc, crypt_blk1_8, abuf, nblocks,
+ &blkn, tmpbuf, sizeof(tmpbuf) / 16, &tmp_used);
+ burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;

wipememory(tmpbuf, tmp_used);
}

c->u_mode.ocb.aad_nblocks = blkn;

+ if (burn_stack_depth)
+ _gcry_burn_stack(burn_stack_depth);
+
return 0;
}

--
2.34.1


_______________________________________________
Gcrypt-devel mailing list
Gcrypt-devel@lists.gnupg.org
https://lists.gnupg.org/mailman/listinfo/gcrypt-devel