tree:   
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master
head:   5c324a2ffa06f8b6fda59a77c6807acb9f45cfee
commit: 9744fec95f0674fbf67b12c42c3784dc299dc904 [45/63] crypto: inside-secure 
- remove request list to improve performance
config: powerpc-allmodconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout 9744fec95f0674fbf67b12c42c3784dc299dc904
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=powerpc 

Note: it may well be a FALSE warning. FWIW you are at least aware of it now.
http://gcc.gnu.org/wiki/Better_Uninitialized_Warnings

All warnings (new ones prefixed by >>):

   drivers/crypto/inside-secure/safexcel_cipher.c: In function 
'safexcel_send_req':
>> drivers/crypto/inside-secure/safexcel_cipher.c:475:2: warning: 'first_rdesc' 
>> may be used uninitialized in this function [-Wmaybe-uninitialized]
     safexcel_rdr_req_set(priv, ring, first_rdesc, base);
     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

vim +/first_rdesc +475 drivers/crypto/inside-secure/safexcel_cipher.c

   373  
   374  static int safexcel_send_req(struct crypto_async_request *base, int 
ring,
   375                               struct safexcel_cipher_req *sreq,
   376                               struct scatterlist *src, struct 
scatterlist *dst,
   377                               unsigned int cryptlen, unsigned int 
assoclen,
   378                               unsigned int digestsize, u8 *iv, int 
*commands,
   379                               int *results)
   380  {
   381          struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
   382          struct safexcel_crypto_priv *priv = ctx->priv;
   383          struct safexcel_command_desc *cdesc;
   384          struct safexcel_result_desc *rdesc, *first_rdesc;
   385          struct scatterlist *sg;
   386          unsigned int totlen = cryptlen + assoclen;
   387          int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
   388          int i, ret = 0;
   389  
   390          if (src == dst) {
   391                  nr_src = dma_map_sg(priv->dev, src,
   392                                      sg_nents_for_len(src, totlen),
   393                                      DMA_BIDIRECTIONAL);
   394                  nr_dst = nr_src;
   395                  if (!nr_src)
   396                          return -EINVAL;
   397          } else {
   398                  nr_src = dma_map_sg(priv->dev, src,
   399                                      sg_nents_for_len(src, totlen),
   400                                      DMA_TO_DEVICE);
   401                  if (!nr_src)
   402                          return -EINVAL;
   403  
   404                  nr_dst = dma_map_sg(priv->dev, dst,
   405                                      sg_nents_for_len(dst, totlen),
   406                                      DMA_FROM_DEVICE);
   407                  if (!nr_dst) {
   408                          dma_unmap_sg(priv->dev, src,
   409                                       sg_nents_for_len(src, totlen),
   410                                       DMA_TO_DEVICE);
   411                          return -EINVAL;
   412                  }
   413          }
   414  
   415          memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
   416  
   417          if (ctx->aead) {
   418                  memcpy(ctx->base.ctxr->data + ctx->key_len / 
sizeof(u32),
   419                         ctx->ipad, ctx->state_sz);
   420                  memcpy(ctx->base.ctxr->data + (ctx->key_len + 
ctx->state_sz) / sizeof(u32),
   421                         ctx->opad, ctx->state_sz);
   422          }
   423  
   424          /* command descriptors */
   425          for_each_sg(src, sg, nr_src, i) {
   426                  int len = sg_dma_len(sg);
   427  
   428                  /* Do not overflow the request */
   429                  if (queued - len < 0)
   430                          len = queued;
   431  
   432                  cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, 
!(queued - len),
   433                                             sg_dma_address(sg), len, 
totlen,
   434                                             ctx->base.ctxr_dma);
   435                  if (IS_ERR(cdesc)) {
   436                          /* No space left in the command descriptor ring 
*/
   437                          ret = PTR_ERR(cdesc);
   438                          goto cdesc_rollback;
   439                  }
   440                  n_cdesc++;
   441  
   442                  if (n_cdesc == 1) {
   443                          safexcel_context_control(ctx, base, sreq, 
cdesc);
   444                          if (ctx->aead)
   445                                  safexcel_aead_token(ctx, iv, cdesc,
   446                                                      sreq->direction, 
cryptlen,
   447                                                      assoclen, 
digestsize);
   448                          else
   449                                  safexcel_skcipher_token(ctx, iv, cdesc,
   450                                                          cryptlen);
   451                  }
   452  
   453                  queued -= len;
   454                  if (!queued)
   455                          break;
   456          }
   457  
   458          /* result descriptors */
   459          for_each_sg(dst, sg, nr_dst, i) {
   460                  bool first = !i, last = (i == nr_dst - 1);
   461                  u32 len = sg_dma_len(sg);
   462  
   463                  rdesc = safexcel_add_rdesc(priv, ring, first, last,
   464                                             sg_dma_address(sg), len);
   465                  if (IS_ERR(rdesc)) {
   466                          /* No space left in the result descriptor ring 
*/
   467                          ret = PTR_ERR(rdesc);
   468                          goto rdesc_rollback;
   469                  }
   470                  if (first)
   471                          first_rdesc = rdesc;
   472                  n_rdesc++;
   473          }
   474  
 > 475          safexcel_rdr_req_set(priv, ring, first_rdesc, base);
   476  
   477          *commands = n_cdesc;
   478          *results = n_rdesc;
   479          return 0;
   480  
   481  rdesc_rollback:
   482          for (i = 0; i < n_rdesc; i++)
   483                  safexcel_ring_rollback_wptr(priv, 
&priv->ring[ring].rdr);
   484  cdesc_rollback:
   485          for (i = 0; i < n_cdesc; i++)
   486                  safexcel_ring_rollback_wptr(priv, 
&priv->ring[ring].cdr);
   487  
   488          if (src == dst) {
   489                  dma_unmap_sg(priv->dev, src,
   490                               sg_nents_for_len(src, totlen),
   491                               DMA_BIDIRECTIONAL);
   492          } else {
   493                  dma_unmap_sg(priv->dev, src,
   494                               sg_nents_for_len(src, totlen),
   495                               DMA_TO_DEVICE);
   496                  dma_unmap_sg(priv->dev, dst,
   497                               sg_nents_for_len(dst, totlen),
   498                               DMA_FROM_DEVICE);
   499          }
   500  
   501          return ret;
   502  }
   503  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to