[PATCH v4 2/8] crypto: polyval - Add POLYVAL support

Eric Biggers ebiggers at kernel.org
Mon Apr 18 12:25:13 PDT 2022


A couple more nits:

On Tue, Apr 12, 2022 at 05:28:10PM +0000, Nathan Huckleberry wrote:

> +static int polyval_init(struct shash_desc *desc)
[...]
> +static int polyval_setkey(struct crypto_shash *tfm,

As I mentioned on the x86 patch, setkey() is the first step, before init().
People sometimes mix this up, e.g. see https://git.kernel.org/linus/f3aefb6a7066e24b.
Putting the definitions in their natural order might be helpful:

	1. polyval_setkey()
	2. polyval_init()
	3. polyval_update()
	4. polyval_final()

> +static void reverse_block(u8 block[POLYVAL_BLOCK_SIZE])
> +{
> +	u64 *p1 = (u64 *)block;
> +	u64 *p2 = (u64 *)&block[8];
> +	u64 a = get_unaligned(p1);
> +	u64 b = get_unaligned(p2);
> +
> +	put_unaligned(swab64(a), p2);
> +	put_unaligned(swab64(b), p1);
> +}

This is always paired with a memcpy() of the block, so consider making this
helper function handle the copy too.  E.g.

diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
index 1399af125b937..b50db5dd51fd1 100644
--- a/crypto/polyval-generic.c
+++ b/crypto/polyval-generic.c
@@ -75,15 +75,14 @@ static int polyval_init(struct shash_desc *desc)
 	return 0;
 }
 
-static void reverse_block(u8 block[POLYVAL_BLOCK_SIZE])
+static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
+			     const u8 src[POLYVAL_BLOCK_SIZE])
 {
-	u64 *p1 = (u64 *)block;
-	u64 *p2 = (u64 *)&block[8];
-	u64 a = get_unaligned(p1);
-	u64 b = get_unaligned(p2);
+	u64 a = get_unaligned((const u64 *)&src[0]);
+	u64 b = get_unaligned((const u64 *)&src[8]);
 
-	put_unaligned(swab64(a), p2);
-	put_unaligned(swab64(b), p1);
+	put_unaligned(swab64(a), (u64 *)&dst[8]);
+	put_unaligned(swab64(b), (u64 *)&dst[0]);
 }
 
 static int polyval_setkey(struct crypto_shash *tfm,
@@ -98,10 +97,7 @@ static int polyval_setkey(struct crypto_shash *tfm,
 	gf128mul_free_4k(ctx->gf128);
 
 	BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE);
-	// avoid violating alignment rules
-	memcpy(&k, key, POLYVAL_BLOCK_SIZE);
-
-	reverse_block((u8 *)&k);
+	copy_and_reverse((u8 *)&k, key);
 	gf128mul_x_lle(&k, &k);
 
 	ctx->gf128 = gf128mul_init_4k_lle(&k);
@@ -137,8 +133,7 @@ static int polyval_update(struct shash_desc *desc,
 	}
 
 	while (srclen >= POLYVAL_BLOCK_SIZE) {
-		memcpy(tmp, src, POLYVAL_BLOCK_SIZE);
-		reverse_block(tmp);
+		copy_and_reverse(tmp, src);
 		crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
 		gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
 		src += POLYVAL_BLOCK_SIZE;
@@ -162,11 +157,7 @@ static int polyval_final(struct shash_desc *desc, u8 *dst)
 
 	if (dctx->bytes)
 		gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
-	dctx->bytes = 0;
-
-	reverse_block(dctx->buffer);
-	memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE);
-
+	copy_and_reverse(dst, dctx->buffer);
 	return 0;
 }



More information about the linux-arm-kernel mailing list