Hi Alexander,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on net-next/master]

url:    
https://github.com/0day-ci/linux/commits/Alexander-Lobakin/qed-qede-improve-chain-API-and-add-XDP_REDIRECT-support/20200723-000000
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 
fa56a987449bcf4c1cb68369a187af3515b85c78
config: alpha-allmodconfig (attached as .config)
compiler: alpha-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross 
ARCH=alpha 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All error/warnings (new ones prefixed by >>):

   drivers/net/ethernet/qlogic/qed/qed_chain.c: In function 
'qed_chain_free_pbl':
>> drivers/net/ethernet/qlogic/qed/qed_chain.c:70:2: error: implicit 
>> declaration of function 'vfree'; did you mean 'kvfree'? 
>> [-Werror=implicit-function-declaration]
      70 |  vfree(chain->pbl.pp_addr_tbl);
         |  ^~~~~
         |  kvfree
   drivers/net/ethernet/qlogic/qed/qed_chain.c: In function 
'qed_chain_alloc_pbl':
>> drivers/net/ethernet/qlogic/qed/qed_chain.c:200:13: error: implicit 
>> declaration of function 'vzalloc'; did you mean 'kvzalloc'? 
>> [-Werror=implicit-function-declaration]
     200 |  addr_tbl = vzalloc(size);
         |             ^~~~~~~
         |             kvzalloc
>> drivers/net/ethernet/qlogic/qed/qed_chain.c:200:11: warning: assignment to 
>> 'struct addr_tbl_entry *' from 'int' makes pointer from integer without a 
>> cast [-Wint-conversion]
     200 |  addr_tbl = vzalloc(size);
         |           ^
   cc1: some warnings being treated as errors

vim +70 drivers/net/ethernet/qlogic/qed/qed_chain.c

    45  
    46  static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain 
*chain)
    47  {
    48          struct device *dev = &cdev->pdev->dev;
    49          struct addr_tbl_entry *entry;
    50          u32 pbl_size, i;
    51  
    52          if (!chain->pbl.pp_addr_tbl)
    53                  return;
    54  
    55          for (i = 0; i < chain->page_cnt; i++) {
    56                  entry = chain->pbl.pp_addr_tbl + i;
    57                  if (!entry->virt_addr)
    58                          break;
    59  
    60                  dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, 
entry->virt_addr,
    61                                    entry->dma_map);
    62          }
    63  
    64          pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
    65  
    66          if (!chain->b_external_pbl)
    67                  dma_free_coherent(dev, pbl_size, 
chain->pbl_sp.p_virt_table,
    68                                    chain->pbl_sp.p_phys_table);
    69  
  > 70          vfree(chain->pbl.pp_addr_tbl);
    71          chain->pbl.pp_addr_tbl = NULL;
    72  }
    73  
    74  /**
    75   * qed_chain_free() - Free chain DMA memory.
    76   *
    77   * @cdev: Main device structure.
    78   * @chain: Chain to free.
    79   */
    80  void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
    81  {
    82          switch (chain->mode) {
    83          case QED_CHAIN_MODE_NEXT_PTR:
    84                  qed_chain_free_next_ptr(cdev, chain);
    85                  break;
    86          case QED_CHAIN_MODE_SINGLE:
    87                  qed_chain_free_single(cdev, chain);
    88                  break;
    89          case QED_CHAIN_MODE_PBL:
    90                  qed_chain_free_pbl(cdev, chain);
    91                  break;
    92          default:
    93                  break;
    94          }
    95  }
    96  
    97  static int
    98  qed_chain_alloc_sanity_check(struct qed_dev *cdev,
    99                               enum qed_chain_cnt_type cnt_type,
   100                               size_t elem_size, u32 page_cnt)
   101  {
   102          u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
   103  
   104          /* The actual chain size can be larger than the maximal 
possible value
   105           * after rounding up the requested elements number to pages, 
and after
   106           * taking into account the unusuable elements (next-ptr 
elements).
   107           * The size of a "u16" chain can be (U16_MAX + 1) since the 
chain
   108           * size/capacity fields are of u32 type.
   109           */
   110          switch (cnt_type) {
   111          case QED_CHAIN_CNT_TYPE_U16:
   112                  if (chain_size > U16_MAX + 1)
   113                          break;
   114  
   115                  return 0;
   116          case QED_CHAIN_CNT_TYPE_U32:
   117                  if (chain_size > U32_MAX)
   118                          break;
   119  
   120                  return 0;
   121          default:
   122                  return -EINVAL;
   123          }
   124  
   125          DP_NOTICE(cdev,
   126                    "The actual chain size (0x%llx) is larger than the 
maximal possible value\n",
   127                    chain_size);
   128  
   129          return -EINVAL;
   130  }
   131  
   132  static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
   133                                      struct qed_chain *chain)
   134  {
   135          struct device *dev = &cdev->pdev->dev;
   136          void *virt, *virt_prev = NULL;
   137          dma_addr_t phys;
   138          u32 i;
   139  
   140          for (i = 0; i < chain->page_cnt; i++) {
   141                  virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, 
&phys,
   142                                            GFP_KERNEL);
   143                  if (!virt)
   144                          return -ENOMEM;
   145  
   146                  if (i == 0) {
   147                          qed_chain_init_mem(chain, virt, phys);
   148                          qed_chain_reset(chain);
   149                  } else {
   150                          qed_chain_init_next_ptr_elem(chain, virt_prev, 
virt,
   151                                                       phys);
   152                  }
   153  
   154                  virt_prev = virt;
   155          }
   156  
   157          /* Last page's next element should point to the beginning of the
   158           * chain.
   159           */
   160          qed_chain_init_next_ptr_elem(chain, virt_prev, 
chain->p_virt_addr,
   161                                       chain->p_phys_addr);
   162  
   163          return 0;
   164  }
   165  
   166  static int qed_chain_alloc_single(struct qed_dev *cdev,
   167                                    struct qed_chain *chain)
   168  {
   169          dma_addr_t phys;
   170          void *virt;
   171  
   172          virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
   173                                    &phys, GFP_KERNEL);
   174          if (!virt)
   175                  return -ENOMEM;
   176  
   177          qed_chain_init_mem(chain, virt, phys);
   178          qed_chain_reset(chain);
   179  
   180          return 0;
   181  }
   182  
   183  static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain 
*chain,
   184                                 struct qed_chain_ext_pbl *ext_pbl)
   185  {
   186          struct device *dev = &cdev->pdev->dev;
   187          struct addr_tbl_entry *addr_tbl;
   188          dma_addr_t phys, pbl_phys;
   189          void *pbl_virt;
   190          u32 page_cnt, i;
   191          size_t size;
   192          void *virt;
   193  
   194          page_cnt = chain->page_cnt;
   195  
   196          size = array_size(page_cnt, sizeof(*addr_tbl));
   197          if (unlikely(size == SIZE_MAX))
   198                  return -EOVERFLOW;
   199  
 > 200          addr_tbl = vzalloc(size);
   201          if (!addr_tbl)
   202                  return -ENOMEM;
   203  
   204          chain->pbl.pp_addr_tbl = addr_tbl;
   205  
   206          if (ext_pbl) {
   207                  size = 0;
   208                  pbl_virt = ext_pbl->p_pbl_virt;
   209                  pbl_phys = ext_pbl->p_pbl_phys;
   210  
   211                  chain->b_external_pbl = true;
   212          } else {
   213                  size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
   214                  if (unlikely(size == SIZE_MAX))
   215                          return -EOVERFLOW;
   216  
   217                  pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
   218                                                GFP_KERNEL);
   219          }
   220  
   221          if (!pbl_virt)
   222                  return -ENOMEM;
   223  
   224          chain->pbl_sp.p_virt_table = pbl_virt;
   225          chain->pbl_sp.p_phys_table = pbl_phys;
   226  
   227          for (i = 0; i < page_cnt; i++) {
   228                  virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, 
&phys,
   229                                            GFP_KERNEL);
   230                  if (!virt)
   231                          return -ENOMEM;
   232  
   233                  if (i == 0) {
   234                          qed_chain_init_mem(chain, virt, phys);
   235                          qed_chain_reset(chain);
   236                  }
   237  
   238                  /* Fill the PBL table with the physical address of the 
page */
   239                  *(dma_addr_t *)pbl_virt = phys;
   240                  pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
   241  
   242                  /* Keep the virtual address of the page */
   243                  addr_tbl[i].virt_addr = virt;
   244                  addr_tbl[i].dma_map = phys;
   245          }
   246  
   247          return 0;
   248  }
   249  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to