diff mbox series

[v2] target/riscv: Use accelerated helper for AES64KS1I

Message ID 20230831154118.138727-1-ardb@kernel.org
State New
Headers show
Series [v2] target/riscv: Use accelerated helper for AES64KS1I | expand

Commit Message

Ard Biesheuvel Aug. 31, 2023, 3:41 p.m. UTC
Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
implement the first half of the key schedule derivation. This does not
actually involve shifting rows, so clone the same value into all four
columns of the AES vector to counter that operation.

Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
v2: assign round constant to elements 0 and 1 only

 target/riscv/crypto_helper.c | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

Comments

Richard Henderson Aug. 31, 2023, 4:22 p.m. UTC | #1
On 8/31/23 08:41, Ard Biesheuvel wrote:
> Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
> implement the first half of the key schedule derivation. This does not
> actually involve shifting rows, so clone the same value into all four
> columns of the AES vector to counter that operation.
> 
> Cc: Richard Henderson<richard.henderson@linaro.org>
> Cc: Philippe Mathieu-Daudé<philmd@linaro.org>
> Cc: Palmer Dabbelt<palmer@dabbelt.com>
> Cc: Alistair Francis<alistair.francis@wdc.com>
> Signed-off-by: Ard Biesheuvel<ardb@kernel.org>
> ---
> v2: assign round constant to elements 0 and 1 only
> 
>   target/riscv/crypto_helper.c | 17 +++++------------
>   1 file changed, 5 insertions(+), 12 deletions(-)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

r~
Philippe Mathieu-Daudé Aug. 31, 2023, 8:02 p.m. UTC | #2
On 31/8/23 17:41, Ard Biesheuvel wrote:
> Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
> implement the first half of the key schedule derivation. This does not
> actually involve shifting rows, so clone the same value into all four
> columns of the AES vector to counter that operation.
> 
> Cc: Richard Henderson <richard.henderson@linaro.org>
> Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>
> Cc: Alistair Francis <alistair.francis@wdc.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> v2: assign round constant to elements 0 and 1 only
> 
>   target/riscv/crypto_helper.c | 17 +++++------------
>   1 file changed, 5 insertions(+), 12 deletions(-)

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Alistair Francis Sept. 1, 2023, 2:30 a.m. UTC | #3
On Fri, Sep 1, 2023 at 3:12 AM Ard Biesheuvel <ardb@kernel.org> wrote:
>
> Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
> implement the first half of the key schedule derivation. This does not
> actually involve shifting rows, so clone the same value into all four
> columns of the AES vector to counter that operation.
>
> Cc: Richard Henderson <richard.henderson@linaro.org>
> Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>
> Cc: Alistair Francis <alistair.francis@wdc.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Thanks!

Applied to riscv-to-apply.next

Alistair

> ---
> v2: assign round constant to elements 0 and 1 only
>
>  target/riscv/crypto_helper.c | 17 +++++------------
>  1 file changed, 5 insertions(+), 12 deletions(-)
>
> diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
> index 4d65945429c6dcc4..bb084e00efe52d1b 100644
> --- a/target/riscv/crypto_helper.c
> +++ b/target/riscv/crypto_helper.c
> @@ -148,24 +148,17 @@ target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
>
>      uint8_t enc_rnum = rnum;
>      uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
> -    uint8_t rcon_ = 0;
> -    target_ulong result;
> +    AESState t, rc = {};
>
>      if (enc_rnum != 0xA) {
>          temp = ror32(temp, 8); /* Rotate right by 8 */
> -        rcon_ = round_consts[enc_rnum];
> +        rc.w[0] = rc.w[1] = round_consts[enc_rnum];
>      }
>
> -    temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
> -           ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
> -           ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
> -           ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
> +    t.w[0] = t.w[1] = t.w[2] = t.w[3] = temp;
> +    aesenc_SB_SR_AK(&t, &t, &rc, false);
>
> -    temp ^= rcon_;
> -
> -    result = ((uint64_t)temp << 32) | temp;
> -
> -    return result;
> +    return t.d[0];
>  }
>
>  target_ulong HELPER(aes64im)(target_ulong rs1)
> --
> 2.39.2
>
>
diff mbox series

Patch

diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
index 4d65945429c6dcc4..bb084e00efe52d1b 100644
--- a/target/riscv/crypto_helper.c
+++ b/target/riscv/crypto_helper.c
@@ -148,24 +148,17 @@  target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
 
     uint8_t enc_rnum = rnum;
     uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
-    uint8_t rcon_ = 0;
-    target_ulong result;
+    AESState t, rc = {};
 
     if (enc_rnum != 0xA) {
         temp = ror32(temp, 8); /* Rotate right by 8 */
-        rcon_ = round_consts[enc_rnum];
+        rc.w[0] = rc.w[1] = round_consts[enc_rnum];
     }
 
-    temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
-           ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
-           ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
-           ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
+    t.w[0] = t.w[1] = t.w[2] = t.w[3] = temp;
+    aesenc_SB_SR_AK(&t, &t, &rc, false);
 
-    temp ^= rcon_;
-
-    result = ((uint64_t)temp << 32) | temp;
-
-    return result;
+    return t.d[0];
 }
 
 target_ulong HELPER(aes64im)(target_ulong rs1)