Mailing List Archive

[PATCH] blake2-avx512: merge some of the gather loads
* cipher/blake2b-amd64-avx512.S (GATHER_MSG_2, GATHER_MSG_3)
(GATHER_MSG_5, GATHER_MSG_6, GATHER_MSG_8, GATHER_MSG_9): New.
(LOAD_MSG_2, LOAD_MSG_3, LOAD_MSG_5, LOAD_MSG_6, LOAD_MSG_8)
(LOAD_MSG_9): Use GATHER_MSG_<number>.
(_blake2b_avx512_data): Add merged load masks ".L[4-7]_mask".
(_gcry_blake2b_transform_amd64_avx512): Load merged load masks
to %k[4-7] and clear registers on exit.
* cipher/blake2s-amd64-avx512.S (VPINSRD_KMASK, GATHER_MSG_2)
(GATHER_MSG_3, GATHER_MSG_5, GATHER_MSG_6, GATHER_MSG_8)
(GATHER_MSG_9): New.
(LOAD_MSG_2, LOAD_MSG_3, LOAD_MSG_5, LOAD_MSG_6, LOAD_MSG_8)
(LOAD_MSG_9): Use GATHER_MSG_<number>.
(_blake2s_avx512_data): Add merged load masks ".L[4-7]_mask".
(_gcry_blake2s_transform_amd64_avx512): Load merged load masks
to %k[4-7] and clear registers on exit.
--

Merged loads reduce number of memory loads and instructions in
blake2-avx512 implementations a bit. However, since GATHER_MSG
is not bottleneck in Intel tigerlake or AMD Zen4, this does not
give easily measurable performance difference, bench-slope results
remain the same as before.

Benchmark on AMD Ryzen 9 7900X (zen4):

Before:
| nanosecs/byte mebibytes/sec cycles/byte auto Mhz
BLAKE2S_256 | 1.14 ns/B 837.6 MiB/s 5.35 c/B 4700
BLAKE2B_512 | 0.772 ns/B 1235 MiB/s 3.63 c/B 4700

After:
| nanosecs/byte mebibytes/sec cycles/byte auto Mhz
BLAKE2S_256 | 1.14 ns/B 837.6 MiB/s 5.35 c/B 4700
BLAKE2B_512 | 0.772 ns/B 1235 MiB/s 3.63 c/B 4700

Benchmark on Intel Core i3-1115G4 (tigerlake):

Before:
| nanosecs/byte mebibytes/sec cycles/byte auto Mhz
BLAKE2S_256 | 1.02 ns/B 934.2 MiB/s 4.18 c/B 4090
BLAKE2B_512 | 0.705 ns/B 1353 MiB/s 2.88 c/B 4089

After:
| nanosecs/byte mebibytes/sec cycles/byte auto Mhz
BLAKE2S_256 | 1.02 ns/B 933.5 MiB/s 4.18 c/B 4089
BLAKE2B_512 | 0.705 ns/B 1353 MiB/s 2.88 c/B 4089

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
---
cipher/blake2b-amd64-avx512.S | 153 +++++++++++++++++++++++++++++---
cipher/blake2s-amd64-avx512.S | 158 +++++++++++++++++++++++++++++++---
2 files changed, 284 insertions(+), 27 deletions(-)

diff --git a/cipher/blake2b-amd64-avx512.S b/cipher/blake2b-amd64-avx512.S
index 3a04818c..b030849e 100644
--- a/cipher/blake2b-amd64-avx512.S
+++ b/cipher/blake2b-amd64-avx512.S
@@ -73,8 +73,6 @@
blake2b/AVX2
**********************************************************************/

-/* Load one qword value at memory location MEM to specific element in
- * target register VREG. Note, KPOS needs to contain value "(1 << QPOS)". */
#define VPINSRQ_KMASK(kpos, qpos, mem, vreg) \
vmovdqu64 -((qpos) * 8) + mem, vreg {kpos}

@@ -98,6 +96,117 @@
VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3); \
VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);

+#define GATHER_MSG_2(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovdqu (s0)*8(RINBLKS), m1x; /* merged load */ \
+ vmovq (s1)*8(RINBLKS), m2x; \
+ vmovq (s8)*8(RINBLKS), m3x; \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k1, 1, (s3)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k1, 1, (s10)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s4)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k2, 2, (s5)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k2, 2, (s12)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k2, 2, (s13)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k3, 3, (s6)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k3, 3, (s7)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);
+
+#define GATHER_MSG_3(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovq (s0)*8(RINBLKS), m1x; \
+ vmovq (s1)*8(RINBLKS), m2x; \
+ vmovdqu64 (s8)*8(RINBLKS), m3 {%k4}{z}; /* merged load */ \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k1, 1, (s2)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k1, 1, (s3)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k1, 1, (s10)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s4)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k2, 2, (s5)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k2, 2, (s13)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k3, 3, (s6)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k3, 3, (s7)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);
+
+#define GATHER_MSG_5(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovq (s0)*8(RINBLKS), m1x; \
+ vmovq (s1)*8(RINBLKS), m2x; \
+ vmovq (s8)*8(RINBLKS), m3x; \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k5, 1, (s2)*8(RINBLKS), m1); /* merged load */ \
+ VPINSRQ_KMASK(%k6, 1, (s3)*8(RINBLKS), m2); /* merged load */ \
+ VPINSRQ_KMASK(%k1, 1, (s10)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s4)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k2, 2, (s12)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k2, 2, (s13)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k3, 3, (s7)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);
+
+#define GATHER_MSG_6(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovdqu64 (s0)*8(RINBLKS), m1 {%k4}{z}; /* merged load */; \
+ vmovq (s1)*8(RINBLKS), m2x; \
+ vmovq (s8)*8(RINBLKS), m3x; \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k1, 1, (s2)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k1, 1, (s3)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k5, 1, (s10)*8(RINBLKS), m3); /* merged load */ \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s5)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k2, 2, (s12)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k2, 2, (s13)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k3, 3, (s6)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k3, 3, (s7)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);
+
+#define GATHER_MSG_8(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovq (s0)*8(RINBLKS), m1x; \
+ vmovq (s1)*8(RINBLKS), m2x; \
+ vmovdqu (s8)*8(RINBLKS), m3x; /* merged load */ \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k1, 1, (s2)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k1, 1, (s3)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s4)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k2, 2, (s5)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k2, 2, (s12)*8(RINBLKS), m3); \
+ vinserti64x2 $1, (s13)*8(RINBLKS), m4, m4; /* merged load */ \
+ VPINSRQ_KMASK(%k3, 3, (s6)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k3, 3, (s7)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3);
+
+#define GATHER_MSG_9(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovq (s0)*8(RINBLKS), m1x; \
+ vmovdqu64 (s1)*8(RINBLKS), m2 {%k7}{z}; /* merged load */; \
+ vmovq (s8)*8(RINBLKS), m3x; \
+ vmovq (s9)*8(RINBLKS), m4x; \
+ VPINSRQ_KMASK(%k1, 1, (s2)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k1, 1, (s3)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k1, 1, (s10)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k1, 1, (s11)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k2, 2, (s4)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k2, 2, (s5)*8(RINBLKS), m2); \
+ VPINSRQ_KMASK(%k2, 2, (s12)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k2, 2, (s13)*8(RINBLKS), m4); \
+ VPINSRQ_KMASK(%k3, 3, (s6)*8(RINBLKS), m1); \
+ VPINSRQ_KMASK(%k3, 3, (s14)*8(RINBLKS), m3); \
+ VPINSRQ_KMASK(%k3, 3, (s15)*8(RINBLKS), m4);
+
#define LOAD_MSG_0(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
@@ -105,29 +214,29 @@
GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3)
#define LOAD_MSG_2(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4)
+ GATHER_MSG_2(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4)
#define LOAD_MSG_3(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8)
+ GATHER_MSG_3(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8)
#define LOAD_MSG_4(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13)
#define LOAD_MSG_5(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9)
+ GATHER_MSG_5(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9)
#define LOAD_MSG_6(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11)
+ GATHER_MSG_6(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11)
#define LOAD_MSG_7(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10)
#define LOAD_MSG_8(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5)
+ GATHER_MSG_8(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5)
#define LOAD_MSG_9(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
- GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
- 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0)
+ GATHER_MSG_9(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \
+ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0)
#define LOAD_MSG_10(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
LOAD_MSG_0(m1, m2, m3, m4, m1x, m2x, m3x, m4x)
#define LOAD_MSG_11(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \
@@ -194,6 +303,14 @@ _blake2b_avx512_data:
.byte 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9
.Lk1_mask:
.byte (1 << 1)
+.Lk4_mask:
+ .byte (1 << 0) + (1 << 2)
+.Lk5_mask:
+ .byte (1 << 1) + (1 << 3)
+.Lk6_mask:
+ .byte (1 << 1) + (1 << 2)
+.Lk7_mask:
+ .byte (1 << 0) + (1 << 3)

.text

@@ -214,6 +331,10 @@ _gcry_blake2b_transform_amd64_avx512:
kmovb .Lk1_mask rRIP, %k1;
kshiftlb $1, %k1, %k2;
kshiftlb $2, %k1, %k3;
+ kmovb .Lk4_mask rRIP, %k4;
+ kmovb .Lk5_mask rRIP, %k5;
+ kmovb .Lk6_mask rRIP, %k6;
+ kmovb .Lk7_mask rRIP, %k7;

addq $128, (STATE_T + 0)(RSTATE);
adcq $0, (STATE_T + 8)(RSTATE);
@@ -293,6 +414,10 @@ _gcry_blake2b_transform_amd64_avx512:
kxord %k1, %k1, %k1;
kxord %k2, %k2, %k2;
kxord %k3, %k3, %k3;
+ kxord %k4, %k4, %k4;
+ kxord %k5, %k5, %k5;
+ kxord %k6, %k6, %k6;
+ kxord %k7, %k7, %k7;

vzeroall;
ret_spec_stop;
diff --git a/cipher/blake2s-amd64-avx512.S b/cipher/blake2s-amd64-avx512.S
index e2da2a18..543944bf 100644
--- a/cipher/blake2s-amd64-avx512.S
+++ b/cipher/blake2s-amd64-avx512.S
@@ -64,7 +64,9 @@
blake2s/AVX
**********************************************************************/

-/* On Intel tigerlake, vmovd+vpinsrd approach is faster than vpgatherdd. */
+#define VPINSRD_KMASK(kpos, dpos, mem, vreg) \
+ vmovdqu32 -((dpos) * 4) + mem, vreg {kpos}
+
#define GATHER_MSG(m1, m2, m3, m4, \
s0, s1, s2, s3, s4, s5, s6, s7, s8, \
s9, s10, s11, s12, s13, s14, s15) \
@@ -85,6 +87,118 @@
vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \
vpinsrd $3, (s15)*4(RINBLKS), m4, m4;

+#define GATHER_MSG_2(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovq (s0)*4(RINBLKS), m1; /* merged load */ \
+ vmovd (s1)*4(RINBLKS), m2; \
+ vmovd (s8)*4(RINBLKS), m3; \
+ vmovd (s9)*4(RINBLKS), m4; \
+ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \
+ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \
+ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \
+ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \
+ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \
+ vpinsrd $3, (s15)*4(RINBLKS), m4, m4;
+
+#define GATHER_MSG_3(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovd (s0)*4(RINBLKS), m1; \
+ vmovd (s1)*4(RINBLKS), m2; \
+ vmovdqu32 (s8)*4(RINBLKS), m3 {%k4}{z}; /* merged load */ \
+ vmovd (s9)*4(RINBLKS), m4; \
+ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \
+ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \
+ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \
+ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \
+ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \
+ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \
+ vpinsrd $3, (s15)*4(RINBLKS), m4, m4;
+
+#define GATHER_MSG_5(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovd (s0)*4(RINBLKS), m1; \
+ vmovd (s1)*4(RINBLKS), m2; \
+ vmovd (s8)*4(RINBLKS), m3; \
+ vmovd (s9)*4(RINBLKS), m4; \
+ VPINSRD_KMASK(%k5, 1, (s2)*4(RINBLKS), m1); /* merged load */ \
+ VPINSRD_KMASK(%k6, 1, (s3)*4(RINBLKS), m2); /* merged load */ \
+ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \
+ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \
+ vpinsrd $3, (s15)*4(RINBLKS), m4, m4;
+
+#define GATHER_MSG_6(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovdqu32 (s0)*4(RINBLKS), m1 {%k4}{z}; /* merged load */; \
+ vmovd (s1)*4(RINBLKS), m2; \
+ vmovd (s8)*4(RINBLKS), m3; \
+ vmovd (s9)*4(RINBLKS), m4; \
+ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \
+ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \
+ VPINSRD_KMASK(%k5, 1, (s10)*4(RINBLKS), m3); /* merged load */ \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \
+ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \
+ vpinsrd $3, (s15)*4(RINBLKS), m4, m4;
+
+#define GATHER_MSG_8(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovd (s0)*4(RINBLKS), m1; \
+ vmovd (s1)*4(RINBLKS), m2; \
+ vmovq (s8)*4(RINBLKS), m3; \
+ vmovd (s9)*4(RINBLKS), m4; \
+ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \
+ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \
+ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrq $1, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \
+ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \
+ vpinsrd $3, (s14)*4(RINBLKS), m3, m3;
+
+#define GATHER_MSG_9(m1, m2, m3, m4, \
+ s0, s1, s2, s3, s4, s5, s6, s7, s8, \
+ s9, s10, s11, s12, s13, s14, s15) \
+ vmovd (s0)*4(RINBLKS), m1; \
+ vmovdqu32 (s1)*4(RINBLKS), m2 {%k7}{z}; /* merged load */; \
+ vmovd (s8)*4(RINBLKS), m3; \
+ vmovd (s9)*4(RINBLKS), m4; \
+ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \
+ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \
+ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \
+ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \
+ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \
+ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \
+ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \
+ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \
+ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \
+ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \
+ vpinsrd $3, (s15)*4(RINBLKS), m4, m4;
+
#define LOAD_MSG_0(m1, m2, m3, m4) \
GATHER_MSG(m1, m2, m3, m4, \
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
@@ -92,29 +206,29 @@
GATHER_MSG(m1, m2, m3, m4, \
14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3)
#define LOAD_MSG_2(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4)
+ GATHER_MSG_2(m1, m2, m3, m4, \
+ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4)
#define LOAD_MSG_3(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8)
+ GATHER_MSG_3(m1, m2, m3, m4, \
+ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8)
#define LOAD_MSG_4(m1, m2, m3, m4) \
GATHER_MSG(m1, m2, m3, m4, \
9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13)
#define LOAD_MSG_5(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9)
+ GATHER_MSG_5(m1, m2, m3, m4, \
+ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9)
#define LOAD_MSG_6(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11)
+ GATHER_MSG_6(m1, m2, m3, m4, \
+ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11)
#define LOAD_MSG_7(m1, m2, m3, m4) \
GATHER_MSG(m1, m2, m3, m4, \
13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10)
#define LOAD_MSG_8(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5)
+ GATHER_MSG_8(m1, m2, m3, m4, \
+ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5)
#define LOAD_MSG_9(m1, m2, m3, m4) \
- GATHER_MSG(m1, m2, m3, m4, \
- 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0)
+ GATHER_MSG_9(m1, m2, m3, m4, \
+ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0)

#define LOAD_MSG(r, m1, m2, m3, m4) LOAD_MSG_##r(m1, m2, m3, m4)

@@ -170,6 +284,14 @@ _blake2s_avx512_data:
.Liv:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
.long 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+.Lk4_mask:
+ .byte (1 << 0) + (1 << 2)
+.Lk5_mask:
+ .byte (1 << 1) + (1 << 3)
+.Lk6_mask:
+ .byte (1 << 1) + (1 << 2)
+.Lk7_mask:
+ .byte (1 << 0) + (1 << 3)

.text

@@ -187,6 +309,11 @@ _gcry_blake2s_transform_amd64_avx512:

spec_stop_avx512;

+ kmovb .Lk4_mask rRIP, %k4;
+ kmovb .Lk5_mask rRIP, %k5;
+ kmovb .Lk6_mask rRIP, %k6;
+ kmovb .Lk7_mask rRIP, %k7;
+
addq $64, (STATE_T + 0)(RSTATE);

vmovdqa .Liv+(0 * 4) rRIP, ROW3;
@@ -255,6 +382,11 @@ _gcry_blake2s_transform_amd64_avx512:
vmovdqu ROW2, (STATE_H + 4 * 4)(RSTATE);

xorl %eax, %eax;
+ kxord %k4, %k4, %k4;
+ kxord %k5, %k5, %k5;
+ kxord %k6, %k6, %k6;
+ kxord %k7, %k7, %k7;
+
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
--
2.39.2


_______________________________________________
Gcrypt-devel mailing list
Gcrypt-devel@gnupg.org
https://lists.gnupg.org/mailman/listinfo/gcrypt-devel