Hi Ilya,

I love your patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    
https://github.com/0day-ci/linux/commits/Ilya-Lesokhin/tls-Add-generic-NIC-offload-infrastructure/20171219-140819
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)


Please review and possibly fold the followup patch.

vim +649 net/tls/tls_device.c

   547  
   548  int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
   549  {
   550          struct tls_crypto_info *crypto_info;
   551          struct tls_offload_context *offload_ctx;
   552          struct tls_record_info *start_marker_record;
   553          u16 nonece_size, tag_size, iv_size, rec_seq_size;
   554          char *iv, *rec_seq;
   555          int rc;
   556          struct net_device *netdev;
   557          struct sk_buff *skb;
   558  
   559          if (!ctx) {
   560                  rc = -EINVAL;
   561                  goto out;
   562          }
   563  
   564          if (ctx->priv_ctx) {
   565                  rc = -EEXIST;
   566                  goto out;
   567          }
   568  
   569          /* We support starting offload on multiple sockets
   570           * concurrently, So we only need a read lock here.
   571           */
   572          percpu_down_read(&device_offload_lock);
   573          netdev = get_netdev_for_sock(sk);
   574          if (!netdev) {
   575                  pr_err("%s: netdev not found\n", __func__);
   576                  rc = -EINVAL;
   577                  goto release_lock;
   578          }
   579  
   580          if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
   581                  rc = -ENOTSUPP;
   582                  goto release_netdev;
   583          }
   584  
   585          /* Avoid offloading if the device is down
   586           * We don't want to offload new flows after
   587           * the NETDEV_DOWN event
   588           */
   589          if (!(netdev->flags & IFF_UP)) {
   590                  rc = -EINVAL;
   591                  goto release_lock;
   592          }
   593  
   594          crypto_info = &ctx->crypto_send;
   595          switch (crypto_info->cipher_type) {
   596          case TLS_CIPHER_AES_GCM_128: {
   597                  nonece_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
   598                  tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
   599                  iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
   600                  iv = ((struct tls12_crypto_info_aes_gcm_128 
*)crypto_info)->iv;
   601                  rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
   602                  rec_seq =
   603                   ((struct tls12_crypto_info_aes_gcm_128 
*)crypto_info)->rec_seq;
   604                  break;
   605          }
   606          default:
   607                  rc = -EINVAL;
   608                  goto release_netdev;
   609          }
   610  
   611          start_marker_record = kmalloc(sizeof(*start_marker_record), 
GFP_KERNEL);
   612          if (!start_marker_record) {
   613                  rc = -ENOMEM;
   614                  goto release_netdev;
   615          }
   616  
   617          rc = attach_sock_to_netdev(sk, netdev, ctx);
   618          if (rc)
   619                  goto free_marker_record;
   620  
   621          ctx->netdev = netdev;
   622  
   623          ctx->prepend_size = TLS_HEADER_SIZE + nonece_size;
   624          ctx->tag_size = tag_size;
   625          ctx->iv_size = iv_size;
   626          ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
   627                            GFP_KERNEL);
   628          if (!ctx->iv) {
   629                  rc = -ENOMEM;
   630                  goto detach_sock;
   631          }
   632  
   633          memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, rec_seq, 
iv_size);
   634  
   635          ctx->rec_seq_size = rec_seq_size;
   636          ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
   637          if (!ctx->rec_seq) {
   638                  rc = -ENOMEM;
   639                  goto err_iv;
   640          }
   641          memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
   642  
   643          offload_ctx = ctx->priv_ctx;
   644          memcpy(&offload_ctx->unacked_record_sn, rec_seq,
   645                 sizeof(offload_ctx->unacked_record_sn));
   646  
   647          /* start at rec_seq -1 to account for the start marker record */
   648          offload_ctx->unacked_record_sn =
 > 649                  be64_to_cpu(offload_ctx->unacked_record_sn) - 1;
   650  
   651          rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
   652          if (rc)
   653                  goto err_iv;
   654  
   655          start_marker_record->end_seq = tcp_sk(sk)->write_seq;
   656          start_marker_record->len = 0;
   657          start_marker_record->num_frags = 0;
   658  
   659          INIT_LIST_HEAD(&offload_ctx->records_list);
   660          list_add_tail(&start_marker_record->list, 
&offload_ctx->records_list);
   661          spin_lock_init(&offload_ctx->lock);
   662  
   663          inet_csk(sk)->icsk_clean_acked = &tls_icsk_clean_acked;
   664          ctx->push_pending_record = tls_device_push_pending_record;
   665          offload_ctx->sk_destruct = sk->sk_destruct;
   666  
   667          /* TLS offload is greatly simplified if we don't send
   668           * SKBs where only part of the payload needs to be encrypted.
   669           * So mark the last skb in the write queue as end of record.
   670           */
   671          skb = tcp_write_queue_tail(sk);
   672          if (skb)
   673                  TCP_SKB_CB(skb)->eor = 1;
   674  
   675          refcount_set(&ctx->refcount, 1);
   676          spin_lock_irq(&tls_device_lock);
   677          list_add_tail(&ctx->list, &tls_device_list);
   678          spin_unlock_irq(&tls_device_lock);
   679  
   680          /* following this assignment tls_is_sk_tx_device_offloaded
   681           * will return true and the context might be accessed
   682           * by the netdev's xmit function.
   683           */
   684          smp_store_release(&sk->sk_destruct,
   685                            &tls_device_sk_destruct);
   686          goto release_lock;
   687  
   688  err_iv:
   689          kfree(ctx->iv);
   690  detach_sock:
   691          netdev->tlsdev_ops->tls_dev_del(netdev, ctx, 
TLS_OFFLOAD_CTX_DIR_TX);
   692  free_marker_record:
   693          kfree(start_marker_record);
   694  release_netdev:
   695          dev_put(netdev);
   696  release_lock:
   697          percpu_up_read(&device_offload_lock);
   698  out:
   699          return rc;
   700  }
   701  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Reply via email to