@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
- * Copyright IBM Corp. 2017,2019
+ * Copyright IBM Corp. 2017,2020
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
*/
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -35,7 +36,7 @@
#define PAES_MAX_KEYSIZE 256
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
@@ -81,16 +82,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
struct s390_paes_ctx {
struct key_blob kb;
struct pkey_protkey pk;
+ spinlock_t pk_lock;
unsigned long fc;
};
struct s390_pxts_ctx {
struct key_blob kb[2];
struct pkey_protkey pk[2];
+ spinlock_t pk_lock;
unsigned long fc;
};
-static inline int __paes_convert_key(struct key_blob *kb,
+static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
int i, ret;
@@ -105,22 +108,18 @@ static inline int __paes_convert_key(struct key_blob *kb,
return ret;
}
-static int __paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{
- unsigned long fc;
+ struct pkey_protkey pkey;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_keyblob2pkey(&ctx->kb, &pkey))
return -EINVAL;
- /* Pick the correct function code based on the protected key type */
- fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(&ctx->pk, &pkey, sizeof(pkey));
+ spin_unlock_bh(&ctx->pk_lock);
- /* Check if the function code is available */
- ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
-
- return ctx->fc ? 0 : -EINVAL;
+ return 0;
}
static int ecb_paes_init(struct crypto_tfm *tfm)
@@ -128,6 +127,7 @@ static int ecb_paes_init(struct crypto_tfm *tfm)
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -139,6 +139,24 @@ static void ecb_paes_exit(struct crypto_tfm *tfm)
_free_kb_keybuf(&ctx->kb);
}
+static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(ctx))
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -150,9 +168,10 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (rc)
return rc;
- if (__paes_set_key(ctx)) {
+ if (__ecb_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
+
}
return 0;
}
@@ -164,18 +183,31 @@ static int ecb_paes_crypt(struct blkcipher_desc *desc,
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes, n, k;
int ret;
+ struct {
+ u8 key[MAXPROTKEYSIZE];
+ } param;
ret = blkcipher_walk_virt(desc, walk);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
+ k = cpacf_km(ctx->fc | modifier, ¶m,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
- if (__paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return blkcipher_walk_done(desc, walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
@@ -229,6 +261,7 @@ static int cbc_paes_init(struct crypto_tfm *tfm)
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -240,11 +273,11 @@ static void cbc_paes_exit(struct crypto_tfm *tfm)
_free_kb_keybuf(&ctx->kb);
}
-static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_convert_key(ctx))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -288,22 +321,31 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
} param;
ret = blkcipher_walk_virt(desc, walk);
+ if (ret)
+ return ret;
+
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, ¶m,
walk->dst.virt.addr, walk->src.virt.addr, n);
- if (k)
+ if (k) {
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ }
if (k < n) {
- if (__cbc_paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return blkcipher_walk_done(desc, walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
- memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
@@ -357,6 +399,7 @@ static int xts_paes_init(struct crypto_tfm *tfm)
ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -369,12 +412,27 @@ static void xts_paes_exit(struct crypto_tfm *tfm)
_free_kb_keybuf(&ctx->kb[1]);
}
-static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
+{
+ struct pkey_protkey pkey0, pkey1;
+
+ if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
+ __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+ return -EINVAL;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
+ memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+ spin_unlock_bh(&ctx->pk_lock);
+
+ return 0;
+}
+
+static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
- __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
+ if (__xts_paes_convert_key(ctx))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
@@ -449,15 +507,19 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
} xts_param;
ret = blkcipher_walk_virt(desc, walk);
+ if (ret)
+ return ret;
+
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
-retry:
+
memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ spin_lock_bh(&ctx->pk_lock);
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
- cpacf_pcc(ctx->fc, pcc_param.key + offset);
-
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
+ spin_unlock_bh(&ctx->pk_lock);
+ cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@@ -468,11 +530,15 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
- if (__xts_paes_set_key(ctx) != 0)
+ if (__xts_paes_convert_key(ctx))
return blkcipher_walk_done(desc, walk, -EIO);
- goto retry;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(xts_param.key + offset,
+ ctx->pk[0].protkey, keylen);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
+
return ret;
}
@@ -525,6 +591,7 @@ static int ctr_paes_init(struct crypto_tfm *tfm)
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -536,11 +603,11 @@ static void ctr_paes_exit(struct crypto_tfm *tfm)
_free_kb_keybuf(&ctx->kb);
}
-static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_convert_key(ctx))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -595,47 +662,62 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 buf[AES_BLOCK_SIZE], *ctrptr;
unsigned int nbytes, n, k;
int ret, locked;
-
- locked = spin_trylock(&ctrblk_lock);
+ struct {
+ u8 key[MAXPROTKEYSIZE];
+ } param;
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
+ locked = mutex_trylock(&ctrblk_lock);
+
+
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
- k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
- walk->dst.virt.addr, walk->src.virt.addr,
- n, ctrptr);
+ k = cpacf_kmctr(ctx->fc | modifier, ¶m, walk->dst.virt.addr,
+ walk->src.virt.addr, n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - k);
}
if (k < n) {
- if (__ctr_paes_set_key(ctx) != 0) {
+ if (__paes_convert_key(ctx)) {
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
return blkcipher_walk_done(desc, walk, -EIO);
}
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
while (1) {
- if (cpacf_kmctr(ctx->fc | modifier,
- ctx->pk.protkey, buf,
+ if (cpacf_kmctr(ctx->fc | modifier, ¶m, buf,
walk->src.virt.addr, AES_BLOCK_SIZE,
walk->iv) == AES_BLOCK_SIZE)
break;
- if (__ctr_paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return blkcipher_walk_done(desc, walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
memcpy(walk->dst.virt.addr, buf, nbytes);
crypto_inc(walk->iv, AES_BLOCK_SIZE);