Herbert Xu wrote: > The important thing for now is not which fallback you pick, but how > you end up invoking them. I want to see the relationship between > your driver and the fallback occur strictly at the crypto layer, not > any lower :)
Attached. In module_init() I call crypto_alloc_tfm() and hold the TFM during the whole life of the module to ensure the fallback modules are available. For now it fails if either sha1 or sha256 can't be loaded, but indeed this will be configurable. From this allocated TFM I learn the module name, which is has been chosen by preference before padlock registered its algorithms and use that name later on to get fallback TFMs. For that I introduced crypto_alloc_tfm_by_module() into crypto/api.c. It works pretty well. The only problem is when I add "alias sha1 padlock" into /etc/modprobe.conf - then "modprobe padlock" goes into deadlock: padlock.ko needs sha1 which turns out to be padlock.ko again and modprobe lost. That would probably need modified modprobe conf to let it fail on circural dependencies or force it to ignore modprobe.conf. Anyway, that's not a real issue right now. > On the other hand, any idea on when VIA will have a partial update > interface? Hopefully in the next CPU version, but unfortunately not in the next steppings of C7. I complained loudly about this always-finalizing "feature" :-) Do you like these patches more? Any comments? Michal Ludvig -- * Personal homepage: http://www.logix.cz/michal
Index: crypto-2.6/crypto/api.c =================================================================== --- crypto-2.6.orig/crypto/api.c +++ crypto-2.6/crypto/api.c @@ -39,7 +39,8 @@ static inline void crypto_alg_put(struct module_put(alg->cra_module); } -static struct crypto_alg *crypto_alg_lookup(const char *name) +static struct crypto_alg *crypto_alg_lookup(const char *name, + const char *module) { struct crypto_driver_list *dl; struct crypto_alg *q, *alg = NULL; @@ -57,6 +58,10 @@ static struct crypto_alg *crypto_alg_loo best = 0; list_for_each_entry(q, &dl->driver_list, cra_list) { + if (module && strcmp(q->cra_driver_name, + module)) + continue; + if (q->cra_priority >= best && crypto_alg_get(q)) { best = q->cra_priority; if (alg) @@ -73,9 +78,11 @@ static struct crypto_alg *crypto_alg_loo /* A far more intelligent version of this is planned. For now, just * try an exact match on the name of the algorithm. */ -static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name) +static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name, + const char *module) { - return try_then_request_module(crypto_alg_lookup(name), name); + return try_then_request_module(crypto_alg_lookup(name, module), + module ? module : name); } static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) @@ -166,16 +173,11 @@ static unsigned int crypto_ctxsize(struc return len + alg->cra_alignmask; } -struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) +static struct crypto_tfm *crypto_alloc_tfm_alg(struct crypto_alg *alg, u32 flags) { struct crypto_tfm *tfm = NULL; - struct crypto_alg *alg; unsigned int tfm_size; - alg = crypto_alg_mod_lookup(name); - if (alg == NULL) - goto out; - tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); tfm = kmalloc(tfm_size, GFP_KERNEL); if (tfm == NULL) @@ -204,6 +206,28 @@ out: return tfm; } +struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) +{ + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(name, NULL); + if (alg == NULL) + return NULL; + + return crypto_alloc_tfm_alg(alg, flags); +} + +struct crypto_tfm *crypto_alloc_tfm_by_module(const char *name, u32 flags, const char *module) +{ + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(name, module); + if (alg == NULL) + return NULL; + + return crypto_alloc_tfm_alg(alg, flags); +} + void crypto_free_tfm(struct crypto_tfm *tfm) { struct crypto_alg *alg; @@ -236,6 +260,9 @@ int crypto_register_alg(struct crypto_al if (alg->cra_blocksize > PAGE_SIZE) return -EINVAL; + if (!alg->cra_driver_name[0]) + strncpy(alg->cra_driver_name, alg->cra_module->name, sizeof(alg->cra_driver_name)); + down_write(&crypto_alg_sem); list_for_each_entry(dl, &crypto_alg_list, alg_list) { @@ -303,7 +330,7 @@ int crypto_unregister_alg(struct crypto_ int crypto_alg_available(const char *name, u32 flags) { int ret = 0; - struct crypto_alg *alg = crypto_alg_mod_lookup(name); + struct crypto_alg *alg = crypto_alg_mod_lookup(name, NULL); if (alg) { crypto_alg_put(alg); @@ -325,5 +352,6 @@ __initcall(init_crypto); EXPORT_SYMBOL_GPL(crypto_register_alg); EXPORT_SYMBOL_GPL(crypto_unregister_alg); EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_by_module); EXPORT_SYMBOL_GPL(crypto_free_tfm); EXPORT_SYMBOL_GPL(crypto_alg_available); Index: crypto-2.6/include/linux/crypto.h =================================================================== --- crypto-2.6.orig/include/linux/crypto.h +++ crypto-2.6/include/linux/crypto.h @@ -131,7 +131,7 @@ struct crypto_alg { unsigned int cra_priority; const char cra_name[CRYPTO_MAX_ALG_NAME]; - const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + char cra_driver_name[CRYPTO_MAX_ALG_NAME]; union { struct cipher_alg cipher; @@ -245,6 +245,8 @@ struct crypto_tfm { * then drops the refcount on the associated algorithm. */ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); +struct crypto_tfm *crypto_alloc_tfm_by_module(const char *alg_name, u32 tfm_flags, + const char *module_name); void crypto_free_tfm(struct crypto_tfm *tfm); /* @@ -255,6 +257,11 @@ static inline const char *crypto_tfm_alg return tfm->__crt_alg->cra_name; } +static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_driver_name; +} + static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm) { return module_name(tfm->__crt_alg->cra_module);
Index: crypto-2.6/drivers/crypto/padlock-sha.c =================================================================== --- /dev/null +++ crypto-2.6/drivers/crypto/padlock-sha.c @@ -0,0 +1,245 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2004 Michal Ludvig <[EMAIL PROTECTED]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include <asm/byteorder.h> +#include "padlock.h" + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_HMAC_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_HMAC_BLOCK_SIZE 64 + +struct padlock_sha_ctx { + char *data; + size_t used; + int bypass; + void (*f_sha_padlock)(const char *in, char *out, int count); + const char *algname; + struct crypto_tfm *tfm; +}; + +#define CTX(ctx) ((struct padlock_sha_ctx*)(ctx)) + +static struct crypto_tfm *tfm_sha1, *tfm_sha256; +static struct crypto_alg sha1_alg, sha256_alg; + +static void padlock_sha_bypass(void *ctx) +{ + if (CTX(ctx)->bypass) + return; + + /* We're attempting to use ALG from a module of the same name, + * e.g. sha1 algo from sha1.ko. This could be more intelligent and + * allow e.g. sha1-i586 module to be used instead. Hmm, maybe later. + * + * BTW We assume we get a valid TFM. There is no error-path from + * digest.dia_init(). + */ + CTX(ctx)->tfm = crypto_alloc_tfm_by_module(CTX(ctx)->algname, 0, CTX(ctx)->algname); + BUG_ON(!CTX(ctx)->tfm); + + crypto_digest_init(CTX(ctx)->tfm); + if (CTX(ctx)->data && CTX(ctx)->used) { + struct scatterlist sg[8]; + + sg_set_buf(&sg[0], CTX(ctx)->data, CTX(ctx)->used); + crypto_digest_update(CTX(ctx)->tfm, sg, 1); + } + + free_page((unsigned long)(CTX(ctx)->data)); + CTX(ctx)->data = NULL; + CTX(ctx)->used = 0; + CTX(ctx)->bypass = 1; +} + +static void padlock_sha_init(void *ctx) +{ + CTX(ctx)->used = 0; + CTX(ctx)->bypass = 0; + CTX(ctx)->data = (char*)__get_free_page(GFP_KERNEL); + if (!CTX(ctx)->data) + padlock_sha_bypass(ctx); + return; +} + +static void padlock_sha_update(void *ctx, const uint8_t *data, unsigned int length) +{ + if (unlikely(!CTX(ctx)->bypass && (CTX(ctx)->used + length > PAGE_SIZE))) + padlock_sha_bypass(ctx); + + if (unlikely(CTX(ctx)->bypass)) { + struct scatterlist sg[8]; + BUG_ON(!CTX(ctx)->tfm); + sg_set_buf(&sg[0], data, length); + crypto_digest_update(CTX(ctx)->tfm, sg, 1); + goto out_unlock; + } + + memcpy(CTX(ctx)->data + CTX(ctx)->used, data, length); + CTX(ctx)->used += length; + +out_unlock: + return; +} + +void padlock_do_sha1(const char *in, char *out, int count) +{ + BUG(); +} + +void padlock_do_sha256(const char *in, char *out, int count) +{ + BUG(); +} + +static void padlock_sha_final(void* ctx, uint8_t *out) +{ + padlock_sha_bypass(ctx); + + if (unlikely(CTX(ctx)->bypass)) { + BUG_ON(!CTX(ctx)->tfm); + crypto_digest_final(CTX(ctx)->tfm, out); + crypto_free_tfm(CTX(ctx)->tfm); + CTX(ctx)->tfm = NULL; + CTX(ctx)->bypass = 0; + return; + } + + /* Pass the input buffer to PadLock microcode... */ + CTX(ctx)->f_sha_padlock(CTX(ctx)->data, out, CTX(ctx)->used); + + if (CTX(ctx)->data) { + free_page((unsigned long)(CTX(ctx)->data)); + CTX(ctx)->data = NULL; + } + + CTX(ctx)->used = 0; +} + +static void padlock_sha1_init(void *ctx) +{ + CTX(ctx)->f_sha_padlock = padlock_do_sha1; + CTX(ctx)->algname = sha1_alg.cra_name; + + padlock_sha_init(ctx); +} + +static void padlock_sha256_init(void *ctx) +{ + CTX(ctx)->f_sha_padlock = padlock_do_sha256; + CTX(ctx)->algname = sha256_alg.cra_name; + + padlock_sha_init(ctx); +} + +static struct crypto_alg sha1_alg = { + .cra_name = "sha1", + .cra_driver_name = "padlock", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), + .cra_u = { + .digest = { + .dia_digestsize = SHA1_DIGEST_SIZE, + .dia_init = padlock_sha1_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +static struct crypto_alg sha256_alg = { + .cra_name = "sha256", + .cra_driver_name = "padlock", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), + .cra_u = { + .digest = { + .dia_digestsize = SHA256_DIGEST_SIZE, + .dia_init = padlock_sha256_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +int __init padlock_init_sha(void) +{ + int rc = -ENOENT; + + /* We'll hold one TFM for each fallback + * to ensure the modules are loaded and available. */ + tfm_sha1 = crypto_alloc_tfm(sha1_alg.cra_name, 0); + if (!tfm_sha1) { + printk(KERN_WARNING PFX "Couldn't load fallback module for %s.\n", + sha1_alg.cra_name); + goto out; + } + printk(KERN_NOTICE PFX "Got TFM for %s from module %s\n", sha1_alg.cra_name, crypto_tfm_alg_driver_name(tfm_sha1)); + + tfm_sha256 = crypto_alloc_tfm(sha256_alg.cra_name, 0); + if (!tfm_sha256) { + printk(KERN_WARNING PFX "Couldn't load fallback module for %s.\n", + sha256_alg.cra_name); + goto out_free1; + } + printk(KERN_NOTICE PFX "Got TFM for %s from module %s\n", sha256_alg.cra_name, crypto_tfm_alg_driver_name(tfm_sha256)); + + rc = crypto_register_alg(&sha1_alg); + if (rc) + goto out_free256; + rc = crypto_register_alg(&sha256_alg); + if (rc) + goto out_unreg1; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_alg(&sha1_alg); +out_free256: + crypto_free_tfm(tfm_sha256); +out_free1: + crypto_free_tfm(tfm_sha1); +out: + return -ENOENT; +} + +void __exit padlock_fini_sha(void) +{ + crypto_free_tfm(tfm_sha1); + crypto_free_tfm(tfm_sha256); + crypto_unregister_alg(&sha1_alg); + crypto_unregister_alg(&sha256_alg); +} Index: crypto-2.6/drivers/crypto/Kconfig =================================================================== --- crypto-2.6.orig/drivers/crypto/Kconfig +++ crypto-2.6/drivers/crypto/Kconfig @@ -20,4 +20,11 @@ config CRYPTO_DEV_PADLOCK_AES help Use VIA PadLock for AES algorithm. +config CRYPTO_DEV_PADLOCK_SHA + bool "Support for SHA1/SHA256 in VIA PadLock" + depends on CRYPTO_DEV_PADLOCK + default y + help + Use VIA PadLock for SHA1/SHA256 algorithms. + endmenu Index: crypto-2.6/drivers/crypto/Makefile =================================================================== --- crypto-2.6.orig/drivers/crypto/Makefile +++ crypto-2.6/drivers/crypto/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o +padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o padlock-objs := padlock-generic.o $(padlock-objs-y) Index: crypto-2.6/drivers/crypto/padlock-generic.c =================================================================== --- crypto-2.6.orig/drivers/crypto/padlock-generic.c +++ crypto-2.6/drivers/crypto/padlock-generic.c @@ -24,6 +24,7 @@ padlock_init(void) { int ret = -ENOSYS; +#if 0 if (!cpu_has_xcrypt) { printk(KERN_ERR PFX "VIA PadLock not detected.\n"); return -ENODEV; @@ -33,6 +34,7 @@ padlock_init(void) printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } +#endif #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES if ((ret = padlock_init_aes())) { @@ -41,6 +43,13 @@ padlock_init(void) } #endif +#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA + if ((ret = padlock_init_sha())) { + printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); + return ret; + } +#endif + if (ret == -ENOSYS) printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n"); @@ -53,6 +62,10 @@ padlock_fini(void) #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES padlock_fini_aes(); #endif + +#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA + padlock_fini_sha(); +#endif } module_init(padlock_init); Index: crypto-2.6/drivers/crypto/padlock.h =================================================================== --- crypto-2.6.orig/drivers/crypto/padlock.h +++ crypto-2.6/drivers/crypto/padlock.h @@ -33,4 +33,9 @@ int padlock_init_aes(void); void padlock_fini_aes(void); #endif +#ifdef CONFIG_CRYPTO_DEV_PADLOCK_SHA +int padlock_init_sha(void); +void padlock_fini_sha(void); +#endif + #endif /* _CRYPTO_PADLOCK_H */