On Sun, Sep 04, 2005 at 03:51:57PM +0300, Ronen Shitrit ([EMAIL PROTECTED]) 
wrote:
> Hi
>  
> Nice job, (horoshaya rabota)
> Just two issue:
> ->I saw this patch is using a global variable which counts the number
> pending requests: dm_async_pending.
> I think that this variable should be allocated per dm_crypt session, i.e
> for each crypt_config, since there can be
> more then 2 consumers reading through the dm_crypt.
> ->I think that we might have a problem  if a write operation will be
> processed in parallel to a read operation,
> the read might wait for the write to complete, and the dm_async_pending
> might also get wrong values???

Updated patch attached.
Only compile tested though - will run test tomorrow.

> Regards
> 
> Ronen Shitrit 
> Marvell Semiconductor Israel Ltd

diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -85,6 +85,11 @@ struct crypt_config {
        struct crypto_tfm *tfm;
        unsigned int key_size;
        u8 key[0];
+
+#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE
+       wait_queue_head_t dm_async_queue;
+       int dm_async_pending;
+#endif
 };
 
 #define MIN_IOS        256
@@ -230,6 +235,186 @@ static struct crypt_iv_operations crypt_
        .generator = crypt_iv_essiv_gen
 };
 
+#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE
+
+#include <linux/acrypto.h>
+#include <linux/crypto_def.h>
+
+struct dm_async_private 
+{
+       void                    *sg;
+       struct crypt_config     *cc;
+};
+
+static inline u16 crypto_tfm_get_type(struct crypto_tfm *tfm)
+{
+       u16 type;
+       char *name;
+
+       name = (char *)crypto_tfm_alg_name(tfm);
+
+       if (!strncmp(name, "aes", 3)) {
+               switch (crypto_tfm_alg_blocksize(tfm) << 3) {
+                       case 128:
+                               type = CRYPTO_TYPE_AES_128;
+                               break;
+                       case 192:
+                               type = CRYPTO_TYPE_AES_192;
+                               break;
+                       case 256:
+                               type = CRYPTO_TYPE_AES_256;
+                               break;
+                       default:
+                               type = 0xffff;
+                               break;
+               }
+       } else if (!strncmp(name, "des3_ede", 3))
+               type = CRYPTO_TYPE_3DES;
+       else
+               type = 0xffff;
+
+       return type;
+}
+
+static inline u16 crypto_tfm_get_mode(struct crypto_tfm *tfm)
+{
+       u16 mode = tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_MASK;
+
+       switch (mode) {
+               case CRYPTO_TFM_MODE_ECB:
+                       mode = CRYPTO_MODE_ECB;
+                       break;
+               case CRYPTO_TFM_MODE_CBC:
+                       mode = CRYPTO_MODE_CBC;
+                       break;
+               case CRYPTO_TFM_MODE_CFB:
+                       mode = CRYPTO_MODE_CFB;
+                       break;
+               case CRYPTO_TFM_MODE_CTR:
+                       mode = CRYPTO_MODE_CTR;
+                       break;
+               default:
+                       mode = 0xffff;
+                       break;
+       }
+
+       return mode;
+}
+
+static void dm_callback(struct crypto_session_initializer *ci, struct 
crypto_data *data)
+{
+       struct dm_async_private *priv = data->priv;
+       
+       priv->cc->dm_async_pending--;
+       wake_up(&priv->cc->dm_async_queue);
+       
+       kfree(priv->sg);
+}
+
+static inline int acrypto_process(struct crypt_config *cc, struct scatterlist 
*out, 
+               struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, 
int write)
+{
+       struct crypto_session *s;
+       struct crypto_session_initializer ci;
+       struct crypto_data data;
+       struct scatterlist *sg;
+       int sg_size, err = 0;
+       u8 *local_key;
+       struct dm_async_private *priv;
+
+       memset(&ci, 0, sizeof(ci));
+       memset(&data, 0, sizeof(data));
+
+       ci.operation = (write)?CRYPTO_OP_ENCRYPT:CRYPTO_OP_DECRYPT;
+       ci.priority = 0;
+       ci.callback = &dm_callback;
+       ci.type = crypto_tfm_get_type(cc->tfm);
+       ci.mode = crypto_tfm_get_mode(cc->tfm);
+       
+       if (ci.type == 0xffff || ci.mode == 0xffff) {
+               err = -EINVAL;
+               goto err_out_exit;
+       }
+
+       data.sg_src_num = data.sg_dst_num = data.sg_key_num = 1;
+       data.sg_iv_num = (iv)?1:0;
+
+       sg_size = sizeof(*sg) * (data.sg_src_num + 
+                       data.sg_dst_num + data.sg_iv_num + 
+                       data.sg_key_num + iv_size + cc->key_size + 
+                       sizeof(struct dm_async_private));
+       priv = (struct dm_async_private *)kmalloc(sg_size, GFP_KERNEL);
+       if (!priv) {
+               err = -ENOMEM;
+               goto err_out_exit;
+       }
+       
+       memset(priv, 0, sg_size);
+#if 0
+       printk("%s: key_size=%u, iv=%p, iv_size=%d, write=%d, priv=%p.\n",
+                       __func__, cc->key_size, iv, iv_size, write, sg);
+#endif
+       data.priv = priv;
+       priv->cc = cc;
+       priv->sg = priv;
+
+       sg = (struct scatterlist *)(priv + 1);
+
+       data.sg_src = &sg[0];
+       data.sg_dst = &sg[1];
+       
+       data.sg_src[0].page = in->page;
+       data.sg_src[0].offset = in->offset;
+       data.sg_src[0].length = in->length;
+       
+       data.sg_dst[0].page = out->page;
+       data.sg_dst[0].offset = out->offset;
+       data.sg_dst[0].length = out->length;
+       
+       data.sg_key = &sg[2];
+       local_key = (u8 *)&sg[3];
+
+       memcpy(local_key, cc->key, cc->key_size);
+       
+       data.sg_key[0].page = virt_to_page(local_key);
+       data.sg_key[0].offset = offset_in_page(local_key);
+       data.sg_key[0].length = cc->key_size;
+
+       if (iv) {
+               u8 *local_iv;
+               
+               data.sg_iv = (struct scatterlist *)(local_key + cc->key_size);
+               local_iv = (u8 *)(local_key + cc->key_size + sizeof(*sg));
+               
+               data.sg_iv[0].page = virt_to_page(local_iv);
+               data.sg_iv[0].offset = offset_in_page(local_iv);
+               data.sg_iv[0].length = iv_size;
+
+               memcpy(local_iv, iv, iv_size);
+       }
+
+       s = crypto_session_alloc(&ci, &data);
+       if (!s) {
+               err = -ENOMEM;
+               goto err_out_free;
+       }
+
+       return 0;
+
+err_out_free:
+       kfree(sg);
+err_out_exit:
+       cc->dm_async_pending--;
+
+       return err;
+}
+#else
+static inline int acrypto_process(struct crypt_config *cc, struct scatterlist 
*out, 
+               struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, 
int write)
+{
+       return 0;
+}
+#endif
 
 static inline int
 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
@@ -244,11 +429,19 @@ crypt_convert_scatterlist(struct crypt_c
                if (r < 0)
                        return r;
 
+               r = acrypto_process(cc, out, in, length, iv, cc->iv_size, 
write);
+               if (r == 0)
+                       return r;
+
                if (write)
                        r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, 
iv);
                else
                        r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, 
iv);
        } else {
+               r = acrypto_process(cc, out, in, length, NULL, 0, write);
+               if (r == 0)
+                       return r;
+               
                if (write)
                        r = crypto_cipher_encrypt(cc->tfm, out, in, length);
                else
@@ -281,6 +474,42 @@ static int crypt_convert(struct crypt_co
 {
        int r = 0;
 
+#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE
+       {
+               unsigned int idx_in, idx_out, offset_in, offset_out;
+               int num = 0;
+
+               idx_in = ctx->idx_in;
+               idx_out = ctx->idx_out;
+               offset_in = ctx->offset_in;
+               offset_out = ctx->offset_out;
+
+               while(idx_in < ctx->bio_in->bi_vcnt &&
+                     idx_out < ctx->bio_out->bi_vcnt) {
+                       
+                       struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, 
idx_in);
+                       struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, 
idx_out);
+               
+                       offset_in += 1 << SECTOR_SHIFT;
+                       offset_out += 1 << SECTOR_SHIFT;
+                       
+                       if (offset_in >= bv_in->bv_len) {
+                               offset_in = 0;
+                               idx_in++;
+                       }
+               
+                       if (offset_out >= bv_out->bv_len) {
+                               offset_out = 0;
+                               idx_out++;
+                       }
+
+                       num++;
+               }
+
+               cc->dm_async_pending = num;
+       }
+#endif
+
        while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
              ctx->idx_out < ctx->bio_out->bi_vcnt) {
                struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
@@ -470,6 +699,17 @@ static void kcryptd_do_work(void *data)
                           io->bio->bi_sector - io->target->begin, 0);
        r = crypt_convert(cc, &ctx);
 
+#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE
+       {
+               long timeout = 1000;
+               long tm;
+               
+               tm = wait_event_timeout(cc->dm_async_queue, 
cc->dm_async_pending == 0, msecs_to_jiffies(timeout));
+               if (!tm)
+                       printk("%s: bug: work was not finished in %ld msecs, %d 
users remains.\n", 
+                                       __func__, timeout, 
cc->dm_async_pending);
+       }
+#endif
        dec_pending(io, r);
 }
 
@@ -680,6 +920,11 @@ static int crypt_ctr(struct dm_target *t
        } else
                cc->iv_mode = NULL;
 
+#if defined CONFIG_ACRYPTO || defined CONFIG_ACRYPTO_MODULE
+       init_waitqueue_head(&cc->dm_async_queue);
+       cc->dm_async_pending = 0;
+#endif
+
        ti->private = cc;
        return 0;



-- 
        Evgeniy Polyakov
-
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to