Hi Tyler, Still some issues with indentation when the sed is causing split across 2 lines. Please fix indentation, nack like that.
Thanks Nic > -----Original Message----- > From: Tyler Retzlaff <[email protected]> > Sent: Monday, May 6, 2024 10:58 AM > To: [email protected] > Cc: Mattias Rönnblom <[email protected]>; Morten Brørup > <[email protected]>; Sevincer, Abdullah > <[email protected]>; Ajit Khaparde > <[email protected]>; Alok Prasad <[email protected]>; > Burakov, Anatoly <[email protected]>; Andrew Rybchenko > <[email protected]>; Anoob Joseph <[email protected]>; > Richardson, Bruce <[email protected]>; Marohn, Byron > <[email protected]>; Chenbo Xia <[email protected]>; > Chengwen Feng <[email protected]>; Loftus, Ciara > <[email protected]>; Power, Ciara <[email protected]>; Dariusz > Sosnowski <[email protected]>; Hunt, David <[email protected]>; > Devendra Singh Rawat <[email protected]>; Carrillo, Erik G > <[email protected]>; Guoyang Zhou <[email protected]>; > Harman Kalra <[email protected]>; Van Haaren, Harry > <[email protected]>; Nagarahalli, Honnappa > <[email protected]>; Jakub Grajciar <[email protected]>; > Jerin Jacob <[email protected]>; Jeroen de Borst <[email protected]>; > Jian Wang <[email protected]>; Jiawen Wu > <[email protected]>; Jie Hai <[email protected]>; Wu, Jingjing > <[email protected]>; Joshua Washington <[email protected]>; > Joyce Kong <[email protected]>; Guo, Junfeng <[email protected]>; > Laatz, Kevin <[email protected]>; Konstantin Ananyev > <[email protected]>; Liang Ma <[email protected]>; > Long Li <[email protected]>; Maciej Czekaj <[email protected]>; > Matan Azrad <[email protected]>; Maxime Coquelin > <[email protected]>; Chautru, Nicolas > <[email protected]>; Ori Kam <[email protected]>; Pavan Nikhilesh > <[email protected]>; Mccarthy, Peter > <[email protected]>; Rahul Lakkireddy > <[email protected]>; Pattan, Reshma > <[email protected]>; Xu, Rosen <[email protected]>; Ruifeng > Wang <[email protected]>; Rushil Gupta <[email protected]>; > Gobriel, Sameh <[email protected]>; Sivaprasad Tummala > <[email protected]>; Somnath Kotur > <[email protected]>; Stephen Hemminger > <[email protected]>; Suanming Mou > <[email protected]>; Sunil Kumar Kori <[email protected]>; Sunil > Uttarwar <[email protected]>; Tetsuya Mukawa > <[email protected]>; Vamsi Attunuru <[email protected]>; > Viacheslav Ovsiienko <[email protected]>; Medvedkin, Vladimir > <[email protected]>; Xiaoyun Wang > <[email protected]>; Wang, Yipeng1 > <[email protected]>; Yisen Zhuang <[email protected]>; > Ziyang Xuan <[email protected]>; Tyler Retzlaff > <[email protected]> > Subject: [PATCH v5 31/45] baseband/acc: use rte stdatomic API > > Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding > rte_atomic_xxx optional rte stdatomic API. > > Signed-off-by: Tyler Retzlaff <[email protected]> > Acked-by: Stephen Hemminger <[email protected]> > --- > drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++-------------- > drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++----- > ------- > 2 files changed, 48 insertions(+), 34 deletions(-) > > diff --git a/drivers/baseband/acc/rte_acc100_pmd.c > b/drivers/baseband/acc/rte_acc100_pmd.c > index 4f666e5..ee50b9c 100644 > --- a/drivers/baseband/acc/rte_acc100_pmd.c > +++ b/drivers/baseband/acc/rte_acc100_pmd.c > @@ -3673,8 +3673,8 @@ > > desc_idx = acc_desc_idx_tail(q, *dequeued_descs); > desc = q->ring_addr + desc_idx; > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3728,8 +3728,8 @@ > uint16_t current_dequeued_descs = 0, descs_in_tb; > > desc = acc_desc_tail(q, *dequeued_descs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3742,8 +3742,8 @@ > /* Check if last CB in TB is ready to dequeue (and thus > * the whole TB) - checking sdone bit. If not return. > */ > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)last_desc, > + rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_SDONE)) > return -1; > > @@ -3755,8 +3755,8 @@ > > while (i < descs_in_tb) { > desc = acc_desc_tail(q, *dequeued_descs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > rsp.val = atom_desc.rsp.val; > rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs > %d\n", > desc, rsp.val, descs_in_tb, desc- > >req.numCBs); @@ -3793,8 +3793,8 @@ > struct rte_bbdev_dec_op *op; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3846,8 +3846,8 @@ > struct rte_bbdev_dec_op *op; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3902,8 +3902,8 @@ > uint8_t cbs_in_tb = 1, cb_idx = 0; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3919,8 +3919,8 @@ > /* Check if last CB in TB is ready to dequeue (and thus > * the whole TB) - checking sdone bit. If not return. > */ > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)last_desc, > + rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_SDONE)) > return -1; > > @@ -3930,8 +3930,8 @@ > /* Read remaining CBs if exists */ > while (cb_idx < cbs_in_tb) { > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > rsp.val = atom_desc.rsp.val; > rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n", > desc, rsp.val, cb_idx, > cbs_in_tb); diff --git a/drivers/baseband/acc/rte_vrb_pmd.c > b/drivers/baseband/acc/rte_vrb_pmd.c > index 88b1104..f7c54be 100644 > --- a/drivers/baseband/acc/rte_vrb_pmd.c > +++ b/drivers/baseband/acc/rte_vrb_pmd.c > @@ -3119,7 +3119,8 @@ > > desc_idx = acc_desc_idx_tail(q, *dequeued_descs); > desc = q->ring_addr + desc_idx; > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > if (*dequeued_ops + desc->req.numCBs > max_requested_ops) > return -1; > @@ -3157,7 +3158,8 @@ > struct rte_bbdev_enc_op *op; > > desc = acc_desc_tail(q, *dequeued_descs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit. */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3192,7 +3194,8 @@ > uint16_t current_dequeued_descs = 0, descs_in_tb; > > desc = acc_desc_tail(q, *dequeued_descs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > if (*dequeued_ops + 1 > max_requested_ops) > return -1; > @@ -3208,7 +3211,8 @@ > /* Check if last CB in TB is ready to dequeue (and thus > * the whole TB) - checking sdone bit. If not return. > */ > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)last_desc, > + rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_SDONE)) > return -1; > > @@ -3220,7 +3224,8 @@ > > while (i < descs_in_tb) { > desc = acc_desc_tail(q, *dequeued_descs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > rsp.val = atom_desc.rsp.val; > > vrb_update_dequeued_operation(desc, rsp, &op->status, > aq_dequeued, true, false); @@ -3246,7 +3251,8 @@ > struct rte_bbdev_dec_op *op; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit. */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3290,7 +3296,8 @@ > struct rte_bbdev_dec_op *op; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit. */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3346,7 +3353,8 @@ > uint32_t tb_crc_check = 0; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit. */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -3362,7 +3370,8 @@ > /* Check if last CB in TB is ready to dequeue (and thus the whole TB) - > checking sdone bit. > * If not return. > */ > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)last_desc, > + rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_SDONE)) > return -1; > > @@ -3372,7 +3381,8 @@ > /* Read remaining CBs if exists. */ > while (cb_idx < cbs_in_tb) { > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > rsp.val = atom_desc.rsp.val; > rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc, > rsp.val, desc->rsp.add_info_0, > @@ -3790,7 +3800,8 @@ > struct rte_bbdev_fft_op *op; > > desc = acc_desc_tail(q, dequeued_cbs); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -4116,7 +4127,8 @@ > uint8_t descs_in_op, i; > > desc = acc_desc_tail(q, dequeued_ops); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > > /* Check fdone bit. */ > if (!(atom_desc.rsp.val & ACC_FDONE)) > @@ -4127,7 +4139,8 @@ > /* Get last CB. */ > last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - > 1); > /* Check if last op is ready to dequeue by checking fdone bit. > If not exit. */ > - atom_desc.atom_hdr = __atomic_load_n((uint64_t > *)last_desc, __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)last_desc, > + rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_FDONE)) > return -1; > #ifdef RTE_LIBRTE_BBDEV_DEBUG > @@ -4137,8 +4150,8 @@ > for (i = 1; i < descs_in_op - 1; i++) { > last_desc = q->ring_addr + ((q->sw_ring_tail + > dequeued_ops + i) > & q->sw_ring_wrap_mask); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t > *)last_desc, > - __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit( > + (uint64_t __rte_atomic *)last_desc, > rte_memory_order_relaxed); > if (!(atom_desc.rsp.val & ACC_FDONE)) > return -1; > } > @@ -4154,7 +4167,8 @@ > > for (i = 0; i < descs_in_op; i++) { > desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) > & q->sw_ring_wrap_mask); > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, > __ATOMIC_RELAXED); > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t > __rte_atomic *)desc, > + rte_memory_order_relaxed); > rsp.val = atom_desc.rsp.val; > > vrb_update_dequeued_operation(desc, rsp, &op->status, > aq_dequeued, true, false); > -- > 1.8.3.1

