[PATCH v4 3/4] dmaengine: sf-pdma: add mpfs-pdma compatible name

Shravan.Chippa at microchip.com Shravan.Chippa at microchip.com
Tue Nov 28 03:29:12 PST 2023


Hi Vinod,

> -----Original Message-----
> From: Vinod Koul <vkoul at kernel.org>
> Sent: Friday, November 24, 2023 5:42 PM
> To: shravan Chippa - I35088 <Shravan.Chippa at microchip.com>
> Cc: green.wan at sifive.com; robh+dt at kernel.org;
> krzysztof.kozlowski+dt at linaro.org; palmer at dabbelt.com;
> paul.walmsley at sifive.com; conor+dt at kernel.org;
> dmaengine at vger.kernel.org; devicetree at vger.kernel.org; linux-
> riscv at lists.infradead.org; linux-kernel at vger.kernel.org; Nagasuresh Relli -
> I67208 <Nagasuresh.Relli at microchip.com>; Praveen Kumar - I30718
> <Praveen.Kumar at microchip.com>; Emil Renner Berthing
> <emil.renner.berthing at canonical.com>
> Subject: Re: [PATCH v4 3/4] dmaengine: sf-pdma: add mpfs-pdma compatible
> name
> 
> [Some people who received this message don't often get email from
> vkoul at kernel.org. Learn why this is important at
> https://aka.ms/LearnAboutSenderIdentification ]
> 
> EXTERNAL EMAIL: Do not click links or open attachments unless you know the
> content is safe
> 
> On 31-10-23, 10:57, shravan chippa wrote:
> > From: Shravan Chippa <shravan.chippa at microchip.com>
> >
> > Sifive platform dma does not allow out-of-order transfers, Add a
> > PolarFire SoC specific compatible and code to support for out-of-order
> > dma transfers
> 
> By default dma xtions are not supposed to be out of order, so why does it
> make sense specifying that here?

All the DMA transfers are mostly in-order; however, sf-pdma IP has programable configuration:
we can select the transfer type either in-order or out-of-order.
 
Sf-pdam IP will only support mem-to-mem transfers. 
 
We got better throughput in the PolarFire SoC platform if we use out-of-order DMA transfers type, instead of in-order configuration in the sf-pdma IP.
 
test results for in-order:
- moved 16 MB from 0x89000000 using pdmacpy to 0x88000000 (chan: 0) in 0.068962 secs (232.012 MB per sec)
 
test results for out-of-order:
- moved 16 MB from 0x89000000 using pdmacpy to 0x88000000 (chan: 0) in 0.037020 secs (432.199 MB per sec)

Thanks,
Shravan

> 
> >
> > Reviewed-by: Emil Renner Berthing <emil.renner.berthing at canonical.com>
> > Signed-off-by: Shravan Chippa <shravan.chippa at microchip.com>
> > ---
> >  drivers/dma/sf-pdma/sf-pdma.c | 27 ++++++++++++++++++++++++---
> > drivers/dma/sf-pdma/sf-pdma.h |  8 +++++++-
> >  2 files changed, 31 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/dma/sf-pdma/sf-pdma.c
> > b/drivers/dma/sf-pdma/sf-pdma.c index 4c456bdef882..82ab12c40743
> > 100644
> > --- a/drivers/dma/sf-pdma/sf-pdma.c
> > +++ b/drivers/dma/sf-pdma/sf-pdma.c
> > @@ -25,6 +25,8 @@
> >
> >  #include "sf-pdma.h"
> >
> > +#define PDMA_QUIRK_NO_STRICT_ORDERING   BIT(0)
> > +
> >  #ifndef readq
> >  static inline unsigned long long readq(void __iomem *addr)  { @@
> > -66,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct
> > sf_pdma_chan *chan)  static void sf_pdma_fill_desc(struct sf_pdma_desc
> *desc,
> >                             u64 dst, u64 src, u64 size)  {
> > -     desc->xfer_type = PDMA_FULL_SPEED;
> > +     desc->xfer_type =  desc->chan->pdma->transfer_type;
> >       desc->xfer_size = size;
> >       desc->dst_addr = dst;
> >       desc->src_addr = src;
> > @@ -520,6 +522,7 @@ static struct dma_chan *sf_pdma_of_xlate(struct
> > of_phandle_args *dma_spec,
> >
> >  static int sf_pdma_probe(struct platform_device *pdev)  {
> > +     const struct sf_pdma_driver_platdata *ddata;
> >       struct sf_pdma *pdma;
> >       int ret, n_chans;
> >       const enum dma_slave_buswidth widths = @@ -545,6 +548,14 @@
> > static int sf_pdma_probe(struct platform_device *pdev)
> >
> >       pdma->n_chans = n_chans;
> >
> > +     pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
> > +
> > +     ddata  = device_get_match_data(&pdev->dev);
> > +     if (ddata) {
> > +             if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
> > +                     pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
> > +     }
> > +
> >       pdma->membase = devm_platform_ioremap_resource(pdev, 0);
> >       if (IS_ERR(pdma->membase))
> >               return PTR_ERR(pdma->membase); @@ -632,9 +643,19 @@
> > static int sf_pdma_remove(struct platform_device *pdev)
> >       return 0;
> >  }
> >
> > +static const struct sf_pdma_driver_platdata mpfs_pdma = {
> > +     .quirks = PDMA_QUIRK_NO_STRICT_ORDERING, };
> > +
> >  static const struct of_device_id sf_pdma_dt_ids[] = {
> > -     { .compatible = "sifive,fu540-c000-pdma" },
> > -     { .compatible = "sifive,pdma0" },
> > +     {
> > +             .compatible = "sifive,fu540-c000-pdma",
> > +     }, {
> > +             .compatible = "sifive,pdma0",
> > +     }, {
> > +             .compatible = "microchip,mpfs-pdma",
> > +             .data       = &mpfs_pdma,
> > +     },
> >       {},
> >  };
> >  MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); diff --git
> > a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
> index
> > 5c398a83b491..267e79a5e0a5 100644
> > --- a/drivers/dma/sf-pdma/sf-pdma.h
> > +++ b/drivers/dma/sf-pdma/sf-pdma.h
> > @@ -48,7 +48,8 @@
> >  #define PDMA_ERR_STATUS_MASK                         GENMASK(31, 31)
> >
> >  /* Transfer Type */
> > -#define PDMA_FULL_SPEED                                      0xFF000008
> > +#define PDMA_FULL_SPEED                                      0xFF000000
> > +#define PDMA_STRICT_ORDERING                         BIT(3)
> >
> >  /* Error Recovery */
> >  #define MAX_RETRY                                    1
> > @@ -112,8 +113,13 @@ struct sf_pdma {
> >       struct dma_device       dma_dev;
> >       void __iomem            *membase;
> >       void __iomem            *mappedbase;
> > +     u32                     transfer_type;
> >       u32                     n_chans;
> >       struct sf_pdma_chan     chans[];
> >  };
> >
> > +struct sf_pdma_driver_platdata {
> > +     u32 quirks;
> > +};
> > +
> >  #endif /* _SF_PDMA_H */
> > --
> > 2.34.1
> 
> --
> ~Vinod



More information about the linux-riscv mailing list