On 09-03-26, 06:33, Xianwei Zhao via B4 Relay wrote:
> From: Xianwei Zhao <[email protected]>
> +static dma_cookie_t aml_dma_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> + return dma_cookie_assign(tx);
> +}
You lost tx, why was it not saved into a queue?
> +static struct dma_async_tx_descriptor *aml_dma_prep_slave_sg
> + (struct dma_chan *chan, struct scatterlist *sgl,
> + unsigned int sg_len, enum dma_transfer_direction direction,
> + unsigned long flags, void *context)
> +{
> + struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
> + struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
> + struct aml_dma_sg_link *sg_link;
> + struct scatterlist *sg;
> + int idx = 0;
> + u64 paddr;
> + u32 reg, link_count, avail, chan_id;
> + u32 i;
> +
> + if (aml_chan->direction != direction) {
> + dev_err(aml_dma->dma_device.dev, "direction not support\n");
> + return NULL;
> + }
> +
> + switch (aml_chan->status) {
> + case DMA_IN_PROGRESS:
> + dev_err(aml_dma->dma_device.dev, "not support multi
> tx_desciptor\n");
> + return NULL;
And why is that. You are preparing a descriptor and keep it ready and
submit after the current one finishes
> +
> + case DMA_COMPLETE:
> + aml_chan->data_len = 0;
> + chan_id = aml_chan->chan_id;
> + reg = (direction == DMA_DEV_TO_MEM) ? WCH_INT_MASK :
> RCH_INT_MASK;
> + regmap_set_bits(aml_dma->regmap, reg, BIT(chan_id));
> +
> + break;
> + default:
> + dev_err(aml_dma->dma_device.dev, "status error\n");
> + return NULL;
> + }
> +
> + link_count = sg_nents_for_dma(sgl, sg_len, SG_MAX_LEN);
> +
> + if (link_count > DMA_MAX_LINK) {
> + dev_err(aml_dma->dma_device.dev,
> + "maximum number of sg exceeded: %d > %d\n",
> + sg_len, DMA_MAX_LINK);
> + aml_chan->status = DMA_ERROR;
> + return NULL;
> + }
> +
> + aml_chan->status = DMA_IN_PROGRESS;
> +
> + for_each_sg(sgl, sg, sg_len, i) {
> + avail = sg_dma_len(sg);
> + paddr = sg->dma_address;
> + while (avail > SG_MAX_LEN) {
> + sg_link = &aml_chan->sg_link[idx++];
> + /* set dma address and len to sglink*/
> + sg_link->address = paddr;
> + sg_link->ctl = FIELD_PREP(LINK_LEN, SG_MAX_LEN);
> + paddr = paddr + SG_MAX_LEN;
> + avail = avail - SG_MAX_LEN;
> + }
> + sg_link = &aml_chan->sg_link[idx++];
> + /* set dma address and len to sglink*/
> + sg_link->address = paddr;
> + sg_link->ctl = FIELD_PREP(LINK_LEN, avail);
> +
> + aml_chan->data_len += sg_dma_len(sg);
> + }
> + aml_chan->sg_link_cnt = idx;
There is no descriptor management here. You are directly writing to
channel. This is _very_ inefficient and defeats the use of dmaengine.
Please revise the driver. Implement queues to manage multiple txns and
we have vchan to help you implement these, so take use of that
--
~Vinod