On Wed, Oct 05, 2011 at 01:33:33PM +0330, Hamid Nassiby wrote:
> 
> OK, I represented code as PSEUSO, just to simplify and concentrate problem's
> aspects ;),  (but it is also possible that I've concentrated it in a
> wrong way :D)
> This is my_cbc_encrypt code and functions it calls, bottom-up:
> 
> int write_request(u8 *buff, unsigned int count)
> {
> 
>       u32  tlp_size = 32;
>       struct my_dma_desc *desc_table = (struct my_dma_desc *)global_bar[0];
>       tlp_size = (count/128) | (tlp_size << 16);
>       memcpy(g_mydev->rdmaBuf_va, buff, count);
>       wmb();
> 
>       writel(cpu_to_le32(tlp_size),(&desc_table->wdmaperf));
>       wmb();
> 
>       while((readl(&desc_table->ddmacr) | 0xFFFF0000)!= 0xFFFF0101);/*wait for
>                                               transfer compeltion*/
>       return 0;
> }
> 
>  int my_transform(struct my_aes_op *op, int alg)
> {
> 
>               int  req_len, err;
>               unsigned long iflagsq, tflag;
>               u8 *req_buf = NULL, *res_buf = NULL;
>               alg_operation operation;
>               if (op->len == 0)
>                       return 0;
>               operation = !(op->dir);
> 
>               create_request(alg, op->mode, operation, 0, op->key,
>                         op->iv, op->src, op->len, &req_buf, &req_len); /*add
>                       header to original request and copy it to req_buf*/
> 
>               spin_lock_irqsave(&glock, tflag);
>               
>               write_request(req_buf, req_len);/*now req_buf is sent to device
>                               , device en/decrypts request and writes the
>                               the result to a fixed dma mapped address*/
>               if (err){
>                       printk(KERN_EMERG"Error WriteReuest:errcode=%d\n", err);
>                       //handle exception (never occured)
>               }
>               kfree(req_buf);
>               req_buf = NULL;
> 
>               memcpy(op->dst, (g_mydev->wdmaBuf_va, op->len);/*copy result 
> from
>                        fixed coherent dma mapped memory to destination*/
>               spin_unlock_irqrestore(&glock, tflag);
>               
>               return op->len;
> }
> 
> static int
> my_cbc_encrypt(struct blkcipher_desc *desc,
>                 struct scatterlist *dst, struct scatterlist *src,
>                 unsigned int nbytes)
> {
>       struct my_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
>       struct blkcipher_walk walk;
>       int err, ret;
>       unsigned long c2flag;
>       if (unlikely(op->keylen != AES_KEYSIZE_128))
>               return fallback_blk_enc(desc, dst, src, nbytes);
> 
> 
>       blkcipher_walk_init(&walk, dst, src, nbytes);
>       err = blkcipher_walk_virt(desc, &walk);
>       op->iv = walk.iv;
> 
>       while((nbytes = walk.nbytes)) {
> 
>               op->src = walk.src.virt.addr,
>               op->dst = walk.dst.virt.addr;
>               op->mode = AES_MODE_CBC;
>               op->len = nbytes /*- (nbytes % AES_MIN_BLOCK_SIZE)*/;
>               op->dir = AES_DIR_ENCRYPT;
>               ret = my_transform(op, 0);
>               nbytes -= ret;
>               err = blkcipher_walk_done(desc, &walk, nbytes);
>       }
> 
>       return err;
> }
> 

I can't tell much when looking at this code snippet. One guess would be
someone (maybe you) has set the CRYPTO_TFM_REQ_MAY_SLEEP flag, as
blkcipher_walk_done calls crypto_yield() which in turn might call
schedule() if this flag is set. prcypt removes this flag explicit.

> 
> Did you turn BHs off, to prevent deadlocks  between your workqueues and
> network's softirqs?
> If there is any other thing that will help, I am pleased to hear.
> 

Basically, the bottom halves are off to keep up with the network softirqs.
They run with much higher priority and would interrupt the parallel
workers frequently.
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to