Herbert Xu wrote:
> Hi Michal:
> 
> On Mon, May 22, 2006 at 02:15:07PM +1200, Michal Ludvig wrote:
>> we should be able to autoload modules via their cra_driver_name, not
>> only by the module name. I added a few MODULE_ALIAS()es, for now only
>> for aes.ko which sets its driver-name and for SHA1/256 modules that I
>> need as fallbacks for padlock-sha.
> 
> I don't see anything wrong with the aliases.  However, I doubt this is
> what you really want for padlock-sha.
> 
> After all, what you need is a SHA fallback, not this one in particular.
> For instance, if we ever have an assembly version of SHA, you want that
> one instead of sha1-generic.

That's configurable with module parameters. Defaults to sha1-generic but
you can easily add sha1_fallback=sha1-i586 to your modprobe command. To
use padlock or sha1-i586 by default you'll have to edit
/etc/modprobe.conf anyway so it's not a big deal to add some parameters
as well.

> So you want to find it as something like a "sha1" algorithm with priority
> less than your own.

I've got a patch that does this (attached fyi) but I reverted it from my
tree in favor of specifying the fallback module by name.

> BTW, for the >1 page case you could use vmap instead of falling back.

Hm, yes. Anyway - what if the nasty kernel tried to hash _really_ lots
of data? The fallback option is cheap - I don't think it will be used
much. I'll do some observations on how much data is usually hashed - I
guess both IPsec on ethernet and dm-crypt will fit into one page, maybe
IPsec with jumbo frames could use up to three pages. This can be
configurable as a parameter as well (buffer_pages=NNN). Is anything else
likely to use larger buffers? I believe the fallback path will be there
as a "just-in-case" option.

Attached are two patches - one that allows selecting crypto_alg by
priority and then the PadLock-sha which doesn't actually use the
priority stuff but instead the driver names for selecting fallback ;-)

BTW The HW parts of padlock are still missing so you can apply it,
unselect padlock-aes and compile and test (e.g. with tcrypt) on any x86
machine.

Michal
Index: linux/crypto/api.c
===================================================================
--- linux.orig/crypto/api.c
+++ linux/crypto/api.c
@@ -39,7 +39,28 @@ static inline void crypto_alg_put(struct
 	module_put(alg->cra_module);
 }
 
-static struct crypto_alg *crypto_alg_lookup(const char *name)
+static inline int prio_compare(int req, int best, int cur,
+		       enum prio_flags flags)
+{
+  switch (flags) {
+    case CRA_PRIO_LOWEST:
+      return (cur < best);
+    case CRA_PRIO_LOWER_THAN:
+      return (cur < req && cur > best);
+    case CRA_PRIO_EXACT:
+      return (cur == req);
+    case CRA_PRIO_HIGHER_THAN:
+      return (cur > best && cur > req);
+    case CRA_PRIO_HIGHEST:
+      return (cur > best);
+    default:
+      BUG();
+  }
+  return 0;
+}
+
+static struct crypto_alg *crypto_alg_lookup_by_priority(const char *name,
+			int prio_requested, enum prio_flags prio_flags)
 {
 	struct crypto_alg *q, *alg = NULL;
 	int best = -1;
@@ -54,7 +75,8 @@ static struct crypto_alg *crypto_alg_loo
 
 		exact = !strcmp(q->cra_driver_name, name);
 		fuzzy = !strcmp(q->cra_name, name);
-		if (!exact && !(fuzzy && q->cra_priority > best))
+		if (!exact && !(fuzzy && prio_compare(prio_requested, best,
+						      q->cra_priority, prio_flags)))
 			continue;
 
 		if (unlikely(!crypto_alg_get(q)))
@@ -73,6 +95,11 @@ static struct crypto_alg *crypto_alg_loo
 	return alg;
 }
 
+static struct crypto_alg *crypto_alg_lookup(const char *name)
+{
+  return crypto_alg_lookup_by_priority(name, 0, CRA_PRIO_HIGHEST);
+}
+
 /* A far more intelligent version of this is planned.  For now, just
  * try an exact match on the name of the algorithm. */
 static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
@@ -168,16 +195,12 @@ static unsigned int crypto_ctxsize(struc
 	return len + alg->cra_alignmask;
 }
 
-struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
+static struct crypto_tfm *crypto_alloc_tfm_from_alg(struct crypto_alg *alg,
+						    u32 flags)
 {
 	struct crypto_tfm *tfm = NULL;
-	struct crypto_alg *alg;
 	unsigned int tfm_size;
 
-	alg = crypto_alg_mod_lookup(name);
-	if (alg == NULL)
-		goto out;
-
 	tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags);
 	tfm = kmalloc(tfm_size, GFP_KERNEL);
 	if (tfm == NULL)
@@ -206,6 +229,36 @@ out:
 	return tfm;
 }
 
+struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
+{
+	struct crypto_alg *alg;
+	struct crypto_tfm *tfm = NULL;
+
+	alg = crypto_alg_mod_lookup(name);
+	if (alg == NULL)
+		goto out;
+	tfm = crypto_alloc_tfm_from_alg(alg, flags);
+
+out:
+	return tfm;
+}
+
+struct crypto_tfm *crypto_alloc_tfm_by_priority(const char *name,
+		u32 flags, int priority, enum prio_flags prio_flags)
+{
+	struct crypto_alg *alg;
+	struct crypto_tfm *tfm = NULL;
+
+	alg = crypto_alg_lookup_by_priority(name, priority, prio_flags);
+	if (alg == NULL)
+		goto out;
+
+	tfm = crypto_alloc_tfm_from_alg(alg, flags);
+
+out:
+	return tfm;
+}
+
 void crypto_free_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_alg *alg;
@@ -321,5 +374,6 @@ __initcall(init_crypto);
 EXPORT_SYMBOL_GPL(crypto_register_alg);
 EXPORT_SYMBOL_GPL(crypto_unregister_alg);
 EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
+EXPORT_SYMBOL_GPL(crypto_alloc_tfm_by_priority);
 EXPORT_SYMBOL_GPL(crypto_free_tfm);
 EXPORT_SYMBOL_GPL(crypto_alg_available);
Index: linux/include/linux/crypto.h
===================================================================
--- linux.orig/include/linux/crypto.h
+++ linux/include/linux/crypto.h
@@ -247,6 +247,17 @@ struct crypto_tfm {
 struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
 void crypto_free_tfm(struct crypto_tfm *tfm);
 
+enum prio_flags {
+  CRA_PRIO_LOWEST	= 1,
+  CRA_PRIO_LOWER_THAN	= 2,
+  CRA_PRIO_EXACT	= 3,
+  CRA_PRIO_HIGHER_THAN	= 4,
+  CRA_PRIO_HIGHEST	= 5,
+};
+
+struct crypto_tfm *crypto_alloc_tfm_by_priority(const char *alg_name,
+	u32 tfm_flags, int priority, enum prio_flags prio_flags);
+
 /*
  * Transform helpers which query the underlying algorithm.
  */
Index: linux/drivers/crypto/padlock-sha.c
===================================================================
--- /dev/null
+++ linux/drivers/crypto/padlock-sha.c
@@ -0,0 +1,264 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for VIA PadLock hardware crypto engine.
+ *
+ * Copyright (c) 2006  Michal Ludvig <[EMAIL PROTECTED]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <asm/byteorder.h>
+#include "padlock.h"
+
+#define PADLOCK_CRA_PRIORITY	300
+
+#define SHA1_DEFAULT_FALLBACK	"sha1-generic"
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_HMAC_BLOCK_SIZE    64
+
+#define SHA256_DEFAULT_FALLBACK "sha256-generic"
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_HMAC_BLOCK_SIZE  64
+
+static char *sha1_fallback = SHA1_DEFAULT_FALLBACK;
+static char *sha256_fallback = SHA256_DEFAULT_FALLBACK;
+
+module_param(sha1_fallback, charp, 0444);
+module_param(sha256_fallback, charp, 0444);
+
+MODULE_PARM_DESC(sha1_fallback, "Fallback driver for SHA1. Default is " SHA1_DEFAULT_FALLBACK);
+MODULE_PARM_DESC(sha256_fallback, "Fallback driver for SHA256. Default is " SHA256_DEFAULT_FALLBACK);
+
+struct padlock_sha_ctx {
+	char		*data;
+	size_t		used;
+	int		bypass;
+	void (*f_sha_padlock)(const char *in, char *out, int count);
+	const char	*fallback_driver_name;
+	struct crypto_tfm *fallback_tfm;
+};
+
+#define CTX(tfm)	((struct padlock_sha_ctx*)(crypto_tfm_ctx(tfm)))
+
+static struct crypto_tfm *tfm_sha1, *tfm_sha256;
+static struct crypto_alg sha1_alg, sha256_alg;
+
+static void padlock_sha_bypass(struct crypto_tfm *tfm)
+{
+	if (CTX(tfm)->bypass)
+		return;
+
+	/* We're attempting to use ALG from a module of the same name,
+	 * e.g. sha1 algo from sha1.ko. This could be more intelligent and
+	 * allow e.g. sha1-i586 module to be used instead. Hmm, maybe later.
+	 *
+	 * BTW We assume we get a valid TFM. There is no error-path from
+	 * digest.dia_init().
+	 */
+	CTX(tfm)->fallback_tfm = crypto_alloc_tfm(CTX(tfm)->fallback_driver_name, 0);
+	BUG_ON(!CTX(tfm)->fallback_tfm);
+
+	crypto_digest_init(CTX(tfm)->fallback_tfm);
+	if (CTX(tfm)->data && CTX(tfm)->used) {
+		struct scatterlist sg[8];
+
+		sg_set_buf(&sg[0], CTX(tfm)->data, CTX(tfm)->used);
+		crypto_digest_update(CTX(tfm)->fallback_tfm, sg, 1);
+	}
+
+	free_page((unsigned long)(CTX(tfm)->data));
+	CTX(tfm)->data = NULL;
+	CTX(tfm)->used = 0;
+	CTX(tfm)->bypass = 1;
+}
+
+static void padlock_sha_init(struct crypto_tfm *tfm)
+{
+	CTX(tfm)->used = 0;
+	CTX(tfm)->bypass = 0;
+	CTX(tfm)->data = (char*)__get_free_page(GFP_KERNEL);
+	if (!CTX(tfm)->data)
+		padlock_sha_bypass(tfm);
+	return;
+}
+
+static void padlock_sha_update(struct crypto_tfm *tfm, const uint8_t *data, unsigned int length)
+{
+	if (unlikely(!CTX(tfm)->bypass && (CTX(tfm)->used + length > PAGE_SIZE)))
+		padlock_sha_bypass(tfm);
+
+	if (unlikely(CTX(tfm)->bypass)) {
+		struct scatterlist sg[8];
+		BUG_ON(!CTX(tfm)->fallback_tfm);
+		sg_set_buf(&sg[0], data, length);
+		crypto_digest_update(CTX(tfm)->fallback_tfm, sg, 1);
+		goto out_unlock;
+	}
+
+	memcpy(CTX(tfm)->data + CTX(tfm)->used, data, length);
+	CTX(tfm)->used += length;
+
+out_unlock:
+	return;
+}
+
+void padlock_do_sha1(const char *in, char *out, int count)
+{
+	BUG();
+}
+
+void padlock_do_sha256(const char *in, char *out, int count)
+{
+	BUG();
+}
+
+static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
+{
+	padlock_sha_bypass(tfm);
+
+	if (unlikely(CTX(tfm)->bypass)) {
+		BUG_ON(!CTX(tfm)->fallback_tfm);
+		crypto_digest_final(CTX(tfm)->fallback_tfm, out);
+		crypto_free_tfm(CTX(tfm)->fallback_tfm);
+		CTX(tfm)->fallback_tfm = NULL;
+		CTX(tfm)->bypass = 0;
+		return;
+	}
+
+	/* Pass the input buffer to PadLock microcode... */
+	CTX(tfm)->f_sha_padlock(CTX(tfm)->data, out, CTX(tfm)->used);
+
+	if (CTX(tfm)->data) {
+		free_page((unsigned long)(CTX(tfm)->data));
+		CTX(tfm)->data = NULL;
+	}
+
+	CTX(tfm)->used = 0;
+}
+
+static void padlock_sha1_init(struct crypto_tfm *tfm)
+{
+	CTX(tfm)->f_sha_padlock = padlock_do_sha1;
+	CTX(tfm)->fallback_driver_name = crypto_tfm_alg_driver_name(tfm_sha1);
+
+	padlock_sha_init(tfm);
+}
+
+static void padlock_sha256_init(struct crypto_tfm *tfm)
+{
+	CTX(tfm)->f_sha_padlock = padlock_do_sha256;
+	CTX(tfm)->fallback_driver_name = crypto_tfm_alg_driver_name(tfm_sha256);
+
+	padlock_sha_init(tfm);
+}
+
+static struct crypto_alg sha1_alg = {
+	.cra_name		=	"sha1",
+	.cra_driver_name	=	"sha1-padlock",
+	.cra_priority		=	PADLOCK_CRA_PRIORITY,
+	.cra_flags		=	CRYPTO_ALG_TYPE_DIGEST,
+	.cra_blocksize		=	SHA1_HMAC_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
+	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(sha1_alg.cra_list),
+	.cra_u			=	{
+		.digest = {
+			.dia_digestsize	=	SHA1_DIGEST_SIZE,
+			.dia_init   	= 	padlock_sha1_init,
+			.dia_update 	=	padlock_sha_update,
+			.dia_final  	=	padlock_sha_final,
+		}
+	}
+};
+
+static struct crypto_alg sha256_alg = {
+	.cra_name		=	"sha256",
+	.cra_driver_name	=	"sha256-padlock",
+	.cra_priority		=	PADLOCK_CRA_PRIORITY,
+	.cra_flags		=	CRYPTO_ALG_TYPE_DIGEST,
+	.cra_blocksize		=	SHA256_HMAC_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
+	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(sha256_alg.cra_list),
+	.cra_u			=	{
+		.digest = {
+			.dia_digestsize	=	SHA256_DIGEST_SIZE,
+			.dia_init   	= 	padlock_sha256_init,
+			.dia_update 	=	padlock_sha_update,
+			.dia_final  	=	padlock_sha_final,
+		}
+	}
+};
+
+int __init padlock_init_sha(void)
+{
+	int rc = -ENOENT;
+
+	/* We'll hold one TFM for each fallback
+	 * to ensure the modules are loaded and available. */
+	tfm_sha1 = crypto_alloc_tfm(sha1_fallback, 0);
+	if (!tfm_sha1) {
+		printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n",
+		       sha1_alg.cra_name, sha1_fallback);
+		goto out;
+	}
+	printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha1_alg.cra_name,
+	       crypto_tfm_alg_driver_name(tfm_sha1), crypto_tfm_alg_priority(tfm_sha1));
+
+	tfm_sha256 = crypto_alloc_tfm(sha256_fallback, 0);
+	if (!tfm_sha256) {
+		printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n",
+		       sha256_alg.cra_name, sha256_fallback);
+		goto out_free1;
+	}
+	printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha256_alg.cra_name,
+	       crypto_tfm_alg_driver_name(tfm_sha256), crypto_tfm_alg_priority(tfm_sha256));
+
+	rc = crypto_register_alg(&sha1_alg);
+	if (rc)
+		goto out_free256;
+
+	rc = crypto_register_alg(&sha256_alg);
+	if (rc)
+		goto out_unreg1;
+
+	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
+
+	return 0;
+
+out_unreg1:
+	crypto_unregister_alg(&sha1_alg);
+out_free256:
+	crypto_free_tfm(tfm_sha256);
+out_free1:
+	crypto_free_tfm(tfm_sha1);
+out:
+	return -ENOENT;
+}
+
+void __exit padlock_fini_sha(void)
+{
+	crypto_free_tfm(tfm_sha1);
+	crypto_free_tfm(tfm_sha256);
+	crypto_unregister_alg(&sha1_alg);
+	crypto_unregister_alg(&sha256_alg);
+}
+
+MODULE_ALIAS("sha1-padlock");
+MODULE_ALIAS("sha256-padlock");
Index: linux/drivers/crypto/Kconfig
===================================================================
--- linux.orig/drivers/crypto/Kconfig
+++ linux/drivers/crypto/Kconfig
@@ -20,4 +20,11 @@ config CRYPTO_DEV_PADLOCK_AES
 	help
 	  Use VIA PadLock for AES algorithm.
 
+config CRYPTO_DEV_PADLOCK_SHA
+	bool "Support for SHA1/SHA256 in VIA PadLock"
+	depends on CRYPTO_DEV_PADLOCK
+	default y
+	help
+	  Use VIA PadLock for SHA1/SHA256 algorithms.
+
 endmenu
Index: linux/drivers/crypto/Makefile
===================================================================
--- linux.orig/drivers/crypto/Makefile
+++ linux/drivers/crypto/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
 
 padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 
 padlock-objs := padlock-generic.o $(padlock-objs-y)
 
Index: linux/drivers/crypto/padlock-generic.c
===================================================================
--- linux.orig/drivers/crypto/padlock-generic.c
+++ linux/drivers/crypto/padlock-generic.c
@@ -24,6 +24,7 @@ padlock_init(void)
 {
 	int ret = -ENOSYS;
 	
+#if 0
 	if (!cpu_has_xcrypt) {
 		printk(KERN_ERR PFX "VIA PadLock not detected.\n");
 		return -ENODEV;
@@ -33,6 +34,7 @@ padlock_init(void)
 		printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
 		return -ENODEV;
 	}
+#endif
 
 #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
 	if ((ret = padlock_init_aes())) {
@@ -41,6 +43,13 @@ padlock_init(void)
 	}
 #endif
 
+#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA
+	if ((ret = padlock_init_sha())) {
+		printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
+		return ret;
+	}
+#endif
+
 	if (ret == -ENOSYS)
 		printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
 
@@ -53,6 +62,10 @@ padlock_fini(void)
 #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
 	padlock_fini_aes();
 #endif
+
+#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA
+	padlock_fini_sha();
+#endif
 }
 
 module_init(padlock_init);
Index: linux/drivers/crypto/padlock.h
===================================================================
--- linux.orig/drivers/crypto/padlock.h
+++ linux/drivers/crypto/padlock.h
@@ -33,4 +33,9 @@ int padlock_init_aes(void);
 void padlock_fini_aes(void);
 #endif
 
+#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA
+int padlock_init_sha(void);
+void padlock_fini_sha(void);
+#endif
+
 #endif	/* _CRYPTO_PADLOCK_H */

Reply via email to