Message ID | 20181128182349.10330-2-noltari@gmail.com |
---|---|
State | Superseded, archived |
Delegated to: | Daniel Schwierzeck |
Headers | show |
Series | bmips: add iudma/enet support | expand |
Am 28.11.18 um 19:23 schrieb Álvaro Fernández Rojas: > BCM6348 IUDMA controller is present on multiple BMIPS (BCM63xx) SoCs. > > Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> > --- > v9: introduce dma_prepare_rcv_buf dma op: > - bcm6348_iudma_chan_stop: switch to mdelay when resetting channel. > - bcm6348_iudma_receive: track dirty dma descriptors and no longer trigger > dma rx channel. > - bcm6348_iudma_send: reorder to properly flush cache and set dma descriptor > config. > - bcm6348_iudma_prepare_rcv_buf: implemented to clear dirty dma descriptors. > v8: Introduce changes from Grygorii Strashko > v5: Several fixes and improvements: > - Remove unused defines. > - Increment rx descriptors. > - Fix flow control issues. > - Error checking now depends on hw. > - Remove unneeded interrupts. > v4: Fix issues reported by Grygorii Strashko and other fixes: > - Remove usage of net_rx_packets as buffer. > - Allocate dynamic rx buffer. > - Check dma errors and discard invalid packets. > v3: no changes > v2: Fix dma rx burst config and select DMA_CHANNELS. > > drivers/dma/Kconfig | 9 + > drivers/dma/Makefile | 1 + > drivers/dma/bcm6348-iudma.c | 576 ++++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 586 insertions(+) > create mode 100644 drivers/dma/bcm6348-iudma.c > > diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig > index 8a4162eccd..1820676d7a 100644 > --- a/drivers/dma/Kconfig > +++ b/drivers/dma/Kconfig > @@ -26,6 +26,15 @@ config SANDBOX_DMA > Enable support for a test DMA uclass implementation. It stimulates > DMA transfer by simple copying data between channels. > > +config BCM6348_IUDMA > + bool "BCM6348 IUDMA driver" > + depends on ARCH_BMIPS > + select DMA_CHANNELS > + help > + Enable the BCM6348 IUDMA driver. > + This driver support data transfer from devices to > + memory and from memory to devices. > + > config TI_EDMA3 > bool "TI EDMA3 driver" > help > diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile > index aff31f986a..b5f9147e0a 100644 > --- a/drivers/dma/Makefile > +++ b/drivers/dma/Makefile > @@ -7,6 +7,7 @@ obj-$(CONFIG_DMA) += dma-uclass.o > > obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o > obj-$(CONFIG_APBH_DMA) += apbh_dma.o > +obj-$(CONFIG_BCM6348_IUDMA) += bcm6348-iudma.o > obj-$(CONFIG_FSL_DMA) += fsl_dma.o > obj-$(CONFIG_SANDBOX_DMA) += sandbox-dma-test.o > obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o > diff --git a/drivers/dma/bcm6348-iudma.c b/drivers/dma/bcm6348-iudma.c > new file mode 100644 > index 0000000000..284735fff3 > --- /dev/null > +++ b/drivers/dma/bcm6348-iudma.c > @@ -0,0 +1,576 @@ > +/* > + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> > + * > + * Derived from linux/drivers/dma/bcm63xx-iudma.c: > + * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> > + * > + * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: > + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> > + * > + * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: > + * Copyright (C) 2000-2010 Broadcom Corporation > + * > + * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: > + * Copyright (C) 2010 Broadcom Corporation > + * > + * SPDX-License-Identifier: GPL-2.0+ // SPDX-License-Identifier: GPL-2.0+ must be the first line of the file > + */ > + > +#include <common.h> > +#include <clk.h> > +#include <dm.h> > +#include <dma-uclass.h> > +#include <memalign.h> > +#include <reset.h> > +#include <asm/io.h> > + > +#define DMA_RX_DESC 6 > +#define DMA_TX_DESC 1 > + > +DECLARE_GLOBAL_DATA_PTR; > + > +#define ALIGN_END_ADDR(type, ptr, size) \ > + ((unsigned long)(ptr) + roundup((size) * sizeof(type), \ > + ARCH_DMA_MINALIGN)) > + > +/* DMA Channels */ > +#define DMA_CHAN_FLOWC(x) ((x) >> 1) > +#define DMA_CHAN_MAX 16 > +#define DMA_CHAN_SIZE 0x10 > +#define DMA_CHAN_TOUT 500 > + > +/* DMA Global Configuration register */ > +#define DMA_CFG_REG 0x00 > +#define DMA_CFG_ENABLE_SHIFT 0 > +#define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) > +#define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) > +#define DMA_CFG_NCHANS_SHIFT 24 > +#define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) > + > +/* DMA Global Flow Control registers */ > +#define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) > +#define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) > +#define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) > +#define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 > +#define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) > + > +/* DMA Global Reset register */ > +#define DMA_RST_REG 0x34 > +#define DMA_RST_CHAN_SHIFT 0 > +#define DMA_RST_CHAN_MASK(x) (1 << x) > + > +/* DMA Channel Configuration register */ > +#define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) > +#define DMAC_CFG_ENABLE_SHIFT 0 > +#define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) > +#define DMAC_CFG_PKT_HALT_SHIFT 1 > +#define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) > +#define DMAC_CFG_BRST_HALT_SHIFT 2 > +#define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) > + > +/* DMA Channel Max Burst Length register */ > +#define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) > + > +/* DMA SRAM Descriptor Ring Start register */ > +#define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) > + > +/* DMA SRAM State/Bytes done/ring offset register */ > +#define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) > + > +/* DMA SRAM Buffer Descriptor status and length register */ > +#define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) > + > +/* DMA SRAM Buffer Descriptor status and length register */ > +#define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) > + > +struct bcm6348_dma_desc { > + uint16_t length; > + > + uint16_t status; > +#define DMAD_ST_CRC_SHIFT 8 > +#define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) > +#define DMAD_ST_WRAP_SHIFT 12 > +#define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) > +#define DMAD_ST_SOP_SHIFT 13 > +#define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) > +#define DMAD_ST_EOP_SHIFT 14 > +#define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) > +#define DMAD_ST_OWN_SHIFT 15 > +#define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) > + > +#define DMAD6348_ST_OV_ERR_SHIFT 0 > +#define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) > +#define DMAD6348_ST_CRC_ERR_SHIFT 1 > +#define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) > +#define DMAD6348_ST_RX_ERR_SHIFT 2 > +#define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) > +#define DMAD6348_ST_OS_ERR_SHIFT 4 > +#define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) > +#define DMAD6348_ST_UN_ERR_SHIFT 9 > +#define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) I would move this out of the struct definition > + > + uint32_t address; > +} __attribute__((aligned(1))); __aligned(1)? But what's the sense of aligning to 1 byte? > + > +struct bcm6348_chan_priv { > + void __iomem *dma_buff; > + void __iomem *dma_ring; > + uint8_t dma_ring_size; > + uint8_t desc_id; > + uint8_t dirty_cnt; > + uint8_t dirty_desc; > +}; > + > +struct bcm6348_iudma_hw { > + uint16_t err_mask; > +}; > + > +struct bcm6348_iudma_priv { > + const struct bcm6348_iudma_hw *hw; > + void __iomem *base; > + void __iomem *chan; > + void __iomem *sram; > + struct bcm6348_chan_priv **ch_priv; > + uint8_t n_channels; > +}; > + > +static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) > +{ > + return !(ch & 1); > +} > + > +static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, > + uint8_t ch) > +{ > + unsigned int timeout = DMA_CHAN_TOUT; > + > + do { > + uint32_t cfg, halt; > + > + if (timeout > DMA_CHAN_TOUT / 2) > + halt = DMAC_CFG_PKT_HALT_MASK; > + else > + halt = DMAC_CFG_BRST_HALT_MASK; > + > + /* try to stop dma channel */ > + writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); > + mb(); > + > + /* check if channel was stopped */ > + cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); > + if (!(cfg & DMAC_CFG_ENABLE_MASK)) > + break; > + > + udelay(1); > + } while (--timeout); > + > + if (!timeout) > + pr_err("unable to stop channel %u\n", ch); > + > + /* reset dma channel */ > + setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); > + mdelay(1); > + clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); > +} > + > +static int bcm6348_iudma_disable(struct dma *dma) > +{ > + struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + > + bcm6348_iudma_chan_stop(priv, dma->id); > + > + if (bcm6348_iudma_chan_is_rx(dma->id)) > + writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, > + DMA_FLOWC_ALLOC_REG(dma->id)); > + > + return 0; > +} > + > +static int bcm6348_iudma_enable(struct dma *dma) > +{ > + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; > + struct bcm6348_dma_desc *dma_desc; > + void __iomem *dma_buff = ch_priv->dma_buff; > + uint8_t i; > + > + /* init dma rings */ > + dma_desc = ch_priv->dma_ring; > + for (i = 0; i < ch_priv->dma_ring_size; i++) { > + uint16_t status; > + > + if (bcm6348_iudma_chan_is_rx(dma->id)) { > + status = DMAD_ST_OWN_MASK; > + dma_desc->length = PKTSIZE_ALIGN; > + dma_desc->address = virt_to_phys(dma_buff); > + dma_buff += PKTSIZE_ALIGN; shouldn't this be somehow set by a consumer driver instead of hard-coding the size of a network packet? > + } else { > + status = 0; > + dma_desc->length = 0; > + dma_desc->address = 0; > + } > + > + if (i == ch_priv->dma_ring_size - 1) > + status |= DMAD_ST_WRAP_MASK; > + > + dma_desc->status = status; > + > + dma_desc++; > + } > + > + /* init to first descriptor */ > + ch_priv->desc_id = 0; > + ch_priv->dirty_desc = 0; > + ch_priv->dirty_cnt = 0; > + > + /* force cache writeback */ > + flush_dcache_range((ulong)ch_priv->dma_ring, > + ALIGN_END_ADDR(struct bcm6348_dma_desc, ch_priv->dma_ring, > + ch_priv->dma_ring_size)); > + > + /* clear sram */ > + writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); > + writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); > + writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); > + > + /* set dma ring start */ > + writel_be(virt_to_phys(ch_priv->dma_ring), > + priv->sram + DMAS_RSTART_REG(dma->id)); > + > + /* set flow control */ > + if (bcm6348_iudma_chan_is_rx(dma->id)) { > + u32 val; > + > + setbits_be32(priv->base + DMA_CFG_REG, > + DMA_CFG_FLOWC_ENABLE(dma->id)); > + > + val = ch_priv->dma_ring_size / 3; > + writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); > + > + val = (ch_priv->dma_ring_size * 2) / 3; > + writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); > + > + writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); > + } > + > + /* set dma max burst */ > + writel_be(ch_priv->dma_ring_size, > + priv->chan + DMAC_BURST_REG(dma->id)); > + > + /* kick dma channel */ > + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); > + > + return 0; > +} > + > +static int bcm6348_iudma_request(struct dma *dma) > +{ > + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + struct bcm6348_chan_priv *ch_priv; > + > + /* check if channel is valid */ > + if (dma->id >= priv->n_channels) > + return -ENODEV; > + > + /* alloc channel private data */ > + priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); > + if (!priv->ch_priv[dma->id]) > + return -ENOMEM; > + ch_priv = priv->ch_priv[dma->id]; > + > + /* alloc dma ring */ > + if (bcm6348_iudma_chan_is_rx(dma->id)) > + ch_priv->dma_ring_size = DMA_RX_DESC; > + else > + ch_priv->dma_ring_size = DMA_TX_DESC; > + > + ch_priv->dma_ring = > + malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * > + ch_priv->dma_ring_size); > + if (!ch_priv->dma_ring) > + return -ENOMEM; > + > + if (bcm6348_iudma_chan_is_rx(dma->id)) { > + ch_priv->dma_buff = > + malloc_cache_aligned(PKTSIZE_ALIGN * > + ch_priv->dma_ring_size); > + if (!ch_priv->dma_buff) > + return -ENOMEM; > + } else { > + ch_priv->dma_buff = NULL; > + } > + > + return 0; > +} > + > +static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) > +{ > + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + const struct bcm6348_iudma_hw *hw = priv->hw; > + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; > + struct bcm6348_dma_desc *dma_desc; > + void __iomem *dma_buff; > + int ret; > + > + if (ch_priv->dirty_cnt == ch_priv->dma_ring_size) { > + pr_err("dma rx ring full of dirty descriptors\n"); > + return -EAGAIN; > + } > + > + /* get dma ring descriptor address */ > + dma_desc = ch_priv->dma_ring; > + dma_desc += ch_priv->desc_id; > + > + /* invalidate cache data */ > + invalidate_dcache_range((ulong)dma_desc, > + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); > + > + /* check dma own */ > + if (dma_desc->status & DMAD_ST_OWN_MASK) > + return -EAGAIN; > + > + /* check pkt */ > + if (!(dma_desc->status & DMAD_ST_EOP_MASK) || > + !(dma_desc->status & DMAD_ST_SOP_MASK) || > + (dma_desc->status & hw->err_mask)) { > + pr_err("invalid pkt received (ch=%ld) (st=%04x)\n", > + dma->id, dma_desc->status); > + ret = -EAGAIN; > + } else { > + uint16_t length; > + > + /* get dma buff descriptor address */ > + dma_buff = phys_to_virt(dma_desc->address); > + > + /* invalidate cache data */ > + invalidate_dcache_range((ulong)dma_buff, > + (ulong)(dma_buff + PKTSIZE_ALIGN)); > + > + /* remove crc */ > + length = dma_desc->length - 4; > + > + /* copy data */ > + memcpy(*dst, dma_buff, length); why not returning the buffer address where the DMA transfered the data to instead of making a copy? > + > + ret = length; > + } > + > + /* increment dma descriptor */ > + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->dma_ring_size; > + ch_priv->dirty_cnt++; > + > + return ret; > +} > + > +static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, > + void *metadata) > +{ > + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; > + struct bcm6348_dma_desc *dma_desc; > + uint16_t status; > + > + /* flush cache */ > + flush_dcache_range((ulong)src, (ulong)src + PKTSIZE_ALIGN); you pass a 'size_t len' but still use the hard-coded buffer size? > + > + /* get dma ring descriptor address */ > + dma_desc = ch_priv->dma_ring; > + dma_desc += ch_priv->desc_id; > + > + /* config dma descriptor */ > + status = (DMAD_ST_OWN_MASK | > + DMAD_ST_EOP_MASK | > + DMAD_ST_CRC_MASK | > + DMAD_ST_SOP_MASK); > + if (ch_priv->desc_id == ch_priv->dma_ring_size - 1) > + status |= DMAD_ST_WRAP_MASK; > + > + dma_desc->address = virt_to_phys(src); > + dma_desc->length = len; > + dma_desc->status = status; > + > + /* flush cache */ > + flush_dcache_range((ulong)dma_desc, > + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); > + > + /* kick tx dma channel */ > + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); > + > + /* poll dma status */ > + do { > + /* invalidate cache */ > + invalidate_dcache_range((ulong)dma_desc, > + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); > + > + if (!(dma_desc->status & DMAD_ST_OWN_MASK)) > + break; > + } while(1); > + > + /* increment dma descriptor */ > + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->dma_ring_size; > + > + return 0; > +} > + > +int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) > +{ > + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); > + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; > + > + /* init dma rings */ > + while (ch_priv->dirty_cnt) { > + struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; > + uint16_t status = DMAD_ST_OWN_MASK; > + > + /* get dirty dma descriptor */ > + dma_desc += ch_priv->dirty_desc; > + > + /* reinit dirty dma descriptor */ > + if (ch_priv->dirty_desc == ch_priv->dma_ring_size - 1) > + status |= DMAD_ST_WRAP_MASK; > + dma_desc->length = PKTSIZE_ALIGN; you pass a 'size_t size' but still use the hard-coded buffer size? > + dma_desc->status = status; > + > + /* flush cache */ > + flush_dcache_range((ulong)dma_desc, > + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); > + > + /* tell dma engine we allocated one buffer */ > + writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); > + > + /* bump dirty dma descriptor */ > + ch_priv->dirty_desc = (ch_priv->dirty_desc + 1) % > + ch_priv->dma_ring_size; > + > + /* kick rx dma channel */ > + ch_priv->dirty_cnt--; > + if (!ch_priv->dirty_cnt) > + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), > + DMAC_CFG_ENABLE_MASK); > + } > + > + return 0; > +} > + > +static const struct dma_ops bcm6348_iudma_ops = { > + .disable = bcm6348_iudma_disable, > + .enable = bcm6348_iudma_enable, > + .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, > + .request = bcm6348_iudma_request, > + .receive = bcm6348_iudma_receive, > + .send = bcm6348_iudma_send, > +}; > + > +static const struct bcm6348_iudma_hw bcm6348_hw = { > + .err_mask = (DMAD6348_ST_OV_ERR_MASK | > + DMAD6348_ST_CRC_ERR_MASK | > + DMAD6348_ST_RX_ERR_MASK | > + DMAD6348_ST_OS_ERR_MASK | > + DMAD6348_ST_UN_ERR_MASK), > +}; > + > +static const struct bcm6348_iudma_hw bcm6368_hw = { > + .err_mask = 0, > +}; > + > +static const struct udevice_id bcm6348_iudma_ids[] = { > + { > + .compatible = "brcm,bcm6348-iudma", > + .data = (ulong)&bcm6348_hw, > + }, { > + .compatible = "brcm,bcm6368-iudma", > + .data = (ulong)&bcm6368_hw, > + }, { /* sentinel */ } > +}; > + > +static int bcm6348_iudma_probe(struct udevice *dev) > +{ > + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); > + struct bcm6348_iudma_priv *priv = dev_get_priv(dev); > + const struct bcm6348_iudma_hw *hw = > + (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); > + fdt_addr_t addr; > + uint8_t ch; > + int i; > + > + uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | > + DMA_SUPPORTS_MEM_TO_DEV); > + priv->hw = hw; > + > + /* try to enable clocks */ > + for (i = 0; ; i++) { > + struct clk clk; > + int ret; > + > + ret = clk_get_by_index(dev, i, &clk); > + if (ret < 0) > + break; why not getting the reference once and do appropiate error handling. Although it's only probing but this looks very inefficient > + if (clk_enable(&clk)) > + pr_err("failed to enable clock %d\n", i); > + clk_free(&clk); > + } > + > + /* try to perform resets */ no error handling for clk_get_by_index()? I guess it doesn't make sense to continue without enabled clocks? > + for (i = 0; ; i++) { > + struct reset_ctl reset; > + int ret; > + > + ret = reset_get_by_index(dev, i, &reset); > + if (ret < 0) > + break; > + if (reset_deassert(&reset)) > + pr_err("failed to deassert reset %d\n", i); > + reset_free(&reset); > + } same here > + > + /* dma global base address */ > + addr = devfdt_get_addr_name(dev, "dma"); > + if (addr == FDT_ADDR_T_NONE) > + return -EINVAL; > + priv->base = ioremap(addr, 0); this is not compatible with DM live tree which you already enabled for BMIPS platform. You should use dev_remap_addr_index() or you should add a generic dev_remap_addr_name(). > + > + /* dma channels base address */ > + addr = devfdt_get_addr_name(dev, "dma-channels"); > + if (addr == FDT_ADDR_T_NONE) > + return -EINVAL; > + priv->chan = ioremap(addr, 0); > + > + /* dma sram base address */ > + addr = devfdt_get_addr_name(dev, "dma-sram"); > + if (addr == FDT_ADDR_T_NONE) > + return -EINVAL; > + priv->sram = ioremap(addr, 0); > + > + /* disable dma controller */ > + clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); > + > + /* get number of channels */ > + priv->n_channels = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev), > + "dma-channels", 8); dev_read_u32() ? > + if (priv->n_channels > DMA_CHAN_MAX) > + return -EINVAL; > + > + /* alloc channel private data pointers */ > + priv->ch_priv = calloc(priv->n_channels, > + sizeof(struct bcm6348_chan_priv*)); > + if (!priv->ch_priv) > + return -ENOMEM; > + > + /* stop dma channels */ > + for (ch = 0; ch < priv->n_channels; ch++) > + bcm6348_iudma_chan_stop(priv, ch); > + > + /* enable dma controller */ > + setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); > + > + return 0; > +} > + > +U_BOOT_DRIVER(bcm6348_iudma) = { > + .name = "bcm6348_iudma", > + .id = UCLASS_DMA, > + .of_match = bcm6348_iudma_ids, > + .ops = &bcm6348_iudma_ops, > + .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv), > + .probe = bcm6348_iudma_probe, > +}; >
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8a4162eccd..1820676d7a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -26,6 +26,15 @@ config SANDBOX_DMA Enable support for a test DMA uclass implementation. It stimulates DMA transfer by simple copying data between channels. +config BCM6348_IUDMA + bool "BCM6348 IUDMA driver" + depends on ARCH_BMIPS + select DMA_CHANNELS + help + Enable the BCM6348 IUDMA driver. + This driver support data transfer from devices to + memory and from memory to devices. + config TI_EDMA3 bool "TI EDMA3 driver" help diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index aff31f986a..b5f9147e0a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_DMA) += dma-uclass.o obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o obj-$(CONFIG_APBH_DMA) += apbh_dma.o +obj-$(CONFIG_BCM6348_IUDMA) += bcm6348-iudma.o obj-$(CONFIG_FSL_DMA) += fsl_dma.o obj-$(CONFIG_SANDBOX_DMA) += sandbox-dma-test.o obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o diff --git a/drivers/dma/bcm6348-iudma.c b/drivers/dma/bcm6348-iudma.c new file mode 100644 index 0000000000..284735fff3 --- /dev/null +++ b/drivers/dma/bcm6348-iudma.c @@ -0,0 +1,576 @@ +/* + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> + * + * Derived from linux/drivers/dma/bcm63xx-iudma.c: + * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu> + * + * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c: + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * + * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h: + * Copyright (C) 2000-2010 Broadcom Corporation + * + * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c: + * Copyright (C) 2010 Broadcom Corporation + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <dma-uclass.h> +#include <memalign.h> +#include <reset.h> +#include <asm/io.h> + +#define DMA_RX_DESC 6 +#define DMA_TX_DESC 1 + +DECLARE_GLOBAL_DATA_PTR; + +#define ALIGN_END_ADDR(type, ptr, size) \ + ((unsigned long)(ptr) + roundup((size) * sizeof(type), \ + ARCH_DMA_MINALIGN)) + +/* DMA Channels */ +#define DMA_CHAN_FLOWC(x) ((x) >> 1) +#define DMA_CHAN_MAX 16 +#define DMA_CHAN_SIZE 0x10 +#define DMA_CHAN_TOUT 500 + +/* DMA Global Configuration register */ +#define DMA_CFG_REG 0x00 +#define DMA_CFG_ENABLE_SHIFT 0 +#define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT) +#define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1) +#define DMA_CFG_NCHANS_SHIFT 24 +#define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT) + +/* DMA Global Flow Control registers */ +#define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c) +#define DMA_FLOWC_ALLOC_FORCE_SHIFT 31 +#define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT) + +/* DMA Global Reset register */ +#define DMA_RST_REG 0x34 +#define DMA_RST_CHAN_SHIFT 0 +#define DMA_RST_CHAN_MASK(x) (1 << x) + +/* DMA Channel Configuration register */ +#define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) +#define DMAC_CFG_ENABLE_SHIFT 0 +#define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT) +#define DMAC_CFG_PKT_HALT_SHIFT 1 +#define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT) +#define DMAC_CFG_BRST_HALT_SHIFT 2 +#define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT) + +/* DMA Channel Max Burst Length register */ +#define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) + +/* DMA SRAM Descriptor Ring Start register */ +#define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00) + +/* DMA SRAM State/Bytes done/ring offset register */ +#define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04) + +/* DMA SRAM Buffer Descriptor status and length register */ +#define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08) + +/* DMA SRAM Buffer Descriptor status and length register */ +#define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c) + +struct bcm6348_dma_desc { + uint16_t length; + + uint16_t status; +#define DMAD_ST_CRC_SHIFT 8 +#define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT) +#define DMAD_ST_WRAP_SHIFT 12 +#define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT) +#define DMAD_ST_SOP_SHIFT 13 +#define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT) +#define DMAD_ST_EOP_SHIFT 14 +#define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT) +#define DMAD_ST_OWN_SHIFT 15 +#define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT) + +#define DMAD6348_ST_OV_ERR_SHIFT 0 +#define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT) +#define DMAD6348_ST_CRC_ERR_SHIFT 1 +#define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT) +#define DMAD6348_ST_RX_ERR_SHIFT 2 +#define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT) +#define DMAD6348_ST_OS_ERR_SHIFT 4 +#define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT) +#define DMAD6348_ST_UN_ERR_SHIFT 9 +#define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT) + + uint32_t address; +} __attribute__((aligned(1))); + +struct bcm6348_chan_priv { + void __iomem *dma_buff; + void __iomem *dma_ring; + uint8_t dma_ring_size; + uint8_t desc_id; + uint8_t dirty_cnt; + uint8_t dirty_desc; +}; + +struct bcm6348_iudma_hw { + uint16_t err_mask; +}; + +struct bcm6348_iudma_priv { + const struct bcm6348_iudma_hw *hw; + void __iomem *base; + void __iomem *chan; + void __iomem *sram; + struct bcm6348_chan_priv **ch_priv; + uint8_t n_channels; +}; + +static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch) +{ + return !(ch & 1); +} + +static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv, + uint8_t ch) +{ + unsigned int timeout = DMA_CHAN_TOUT; + + do { + uint32_t cfg, halt; + + if (timeout > DMA_CHAN_TOUT / 2) + halt = DMAC_CFG_PKT_HALT_MASK; + else + halt = DMAC_CFG_BRST_HALT_MASK; + + /* try to stop dma channel */ + writel_be(halt, priv->chan + DMAC_CFG_REG(ch)); + mb(); + + /* check if channel was stopped */ + cfg = readl_be(priv->chan + DMAC_CFG_REG(ch)); + if (!(cfg & DMAC_CFG_ENABLE_MASK)) + break; + + udelay(1); + } while (--timeout); + + if (!timeout) + pr_err("unable to stop channel %u\n", ch); + + /* reset dma channel */ + setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); + mdelay(1); + clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch)); +} + +static int bcm6348_iudma_disable(struct dma *dma) +{ + struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + + bcm6348_iudma_chan_stop(priv, dma->id); + + if (bcm6348_iudma_chan_is_rx(dma->id)) + writel_be(DMA_FLOWC_ALLOC_FORCE_MASK, + DMA_FLOWC_ALLOC_REG(dma->id)); + + return 0; +} + +static int bcm6348_iudma_enable(struct dma *dma) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc; + void __iomem *dma_buff = ch_priv->dma_buff; + uint8_t i; + + /* init dma rings */ + dma_desc = ch_priv->dma_ring; + for (i = 0; i < ch_priv->dma_ring_size; i++) { + uint16_t status; + + if (bcm6348_iudma_chan_is_rx(dma->id)) { + status = DMAD_ST_OWN_MASK; + dma_desc->length = PKTSIZE_ALIGN; + dma_desc->address = virt_to_phys(dma_buff); + dma_buff += PKTSIZE_ALIGN; + } else { + status = 0; + dma_desc->length = 0; + dma_desc->address = 0; + } + + if (i == ch_priv->dma_ring_size - 1) + status |= DMAD_ST_WRAP_MASK; + + dma_desc->status = status; + + dma_desc++; + } + + /* init to first descriptor */ + ch_priv->desc_id = 0; + ch_priv->dirty_desc = 0; + ch_priv->dirty_cnt = 0; + + /* force cache writeback */ + flush_dcache_range((ulong)ch_priv->dma_ring, + ALIGN_END_ADDR(struct bcm6348_dma_desc, ch_priv->dma_ring, + ch_priv->dma_ring_size)); + + /* clear sram */ + writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id)); + writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id)); + writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id)); + + /* set dma ring start */ + writel_be(virt_to_phys(ch_priv->dma_ring), + priv->sram + DMAS_RSTART_REG(dma->id)); + + /* set flow control */ + if (bcm6348_iudma_chan_is_rx(dma->id)) { + u32 val; + + setbits_be32(priv->base + DMA_CFG_REG, + DMA_CFG_FLOWC_ENABLE(dma->id)); + + val = ch_priv->dma_ring_size / 3; + writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id)); + + val = (ch_priv->dma_ring_size * 2) / 3; + writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id)); + + writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id)); + } + + /* set dma max burst */ + writel_be(ch_priv->dma_ring_size, + priv->chan + DMAC_BURST_REG(dma->id)); + + /* kick dma channel */ + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); + + return 0; +} + +static int bcm6348_iudma_request(struct dma *dma) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv; + + /* check if channel is valid */ + if (dma->id >= priv->n_channels) + return -ENODEV; + + /* alloc channel private data */ + priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv)); + if (!priv->ch_priv[dma->id]) + return -ENOMEM; + ch_priv = priv->ch_priv[dma->id]; + + /* alloc dma ring */ + if (bcm6348_iudma_chan_is_rx(dma->id)) + ch_priv->dma_ring_size = DMA_RX_DESC; + else + ch_priv->dma_ring_size = DMA_TX_DESC; + + ch_priv->dma_ring = + malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) * + ch_priv->dma_ring_size); + if (!ch_priv->dma_ring) + return -ENOMEM; + + if (bcm6348_iudma_chan_is_rx(dma->id)) { + ch_priv->dma_buff = + malloc_cache_aligned(PKTSIZE_ALIGN * + ch_priv->dma_ring_size); + if (!ch_priv->dma_buff) + return -ENOMEM; + } else { + ch_priv->dma_buff = NULL; + } + + return 0; +} + +static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + const struct bcm6348_iudma_hw *hw = priv->hw; + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc; + void __iomem *dma_buff; + int ret; + + if (ch_priv->dirty_cnt == ch_priv->dma_ring_size) { + pr_err("dma rx ring full of dirty descriptors\n"); + return -EAGAIN; + } + + /* get dma ring descriptor address */ + dma_desc = ch_priv->dma_ring; + dma_desc += ch_priv->desc_id; + + /* invalidate cache data */ + invalidate_dcache_range((ulong)dma_desc, + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); + + /* check dma own */ + if (dma_desc->status & DMAD_ST_OWN_MASK) + return -EAGAIN; + + /* check pkt */ + if (!(dma_desc->status & DMAD_ST_EOP_MASK) || + !(dma_desc->status & DMAD_ST_SOP_MASK) || + (dma_desc->status & hw->err_mask)) { + pr_err("invalid pkt received (ch=%ld) (st=%04x)\n", + dma->id, dma_desc->status); + ret = -EAGAIN; + } else { + uint16_t length; + + /* get dma buff descriptor address */ + dma_buff = phys_to_virt(dma_desc->address); + + /* invalidate cache data */ + invalidate_dcache_range((ulong)dma_buff, + (ulong)(dma_buff + PKTSIZE_ALIGN)); + + /* remove crc */ + length = dma_desc->length - 4; + + /* copy data */ + memcpy(*dst, dma_buff, length); + + ret = length; + } + + /* increment dma descriptor */ + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->dma_ring_size; + ch_priv->dirty_cnt++; + + return ret; +} + +static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len, + void *metadata) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + struct bcm6348_dma_desc *dma_desc; + uint16_t status; + + /* flush cache */ + flush_dcache_range((ulong)src, (ulong)src + PKTSIZE_ALIGN); + + /* get dma ring descriptor address */ + dma_desc = ch_priv->dma_ring; + dma_desc += ch_priv->desc_id; + + /* config dma descriptor */ + status = (DMAD_ST_OWN_MASK | + DMAD_ST_EOP_MASK | + DMAD_ST_CRC_MASK | + DMAD_ST_SOP_MASK); + if (ch_priv->desc_id == ch_priv->dma_ring_size - 1) + status |= DMAD_ST_WRAP_MASK; + + dma_desc->address = virt_to_phys(src); + dma_desc->length = len; + dma_desc->status = status; + + /* flush cache */ + flush_dcache_range((ulong)dma_desc, + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); + + /* kick tx dma channel */ + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK); + + /* poll dma status */ + do { + /* invalidate cache */ + invalidate_dcache_range((ulong)dma_desc, + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); + + if (!(dma_desc->status & DMAD_ST_OWN_MASK)) + break; + } while(1); + + /* increment dma descriptor */ + ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->dma_ring_size; + + return 0; +} + +int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) +{ + const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev); + struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id]; + + /* init dma rings */ + while (ch_priv->dirty_cnt) { + struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring; + uint16_t status = DMAD_ST_OWN_MASK; + + /* get dirty dma descriptor */ + dma_desc += ch_priv->dirty_desc; + + /* reinit dirty dma descriptor */ + if (ch_priv->dirty_desc == ch_priv->dma_ring_size - 1) + status |= DMAD_ST_WRAP_MASK; + dma_desc->length = PKTSIZE_ALIGN; + dma_desc->status = status; + + /* flush cache */ + flush_dcache_range((ulong)dma_desc, + ALIGN_END_ADDR(struct bcm6348_dma_desc, dma_desc, 1)); + + /* tell dma engine we allocated one buffer */ + writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id)); + + /* bump dirty dma descriptor */ + ch_priv->dirty_desc = (ch_priv->dirty_desc + 1) % + ch_priv->dma_ring_size; + + /* kick rx dma channel */ + ch_priv->dirty_cnt--; + if (!ch_priv->dirty_cnt) + setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), + DMAC_CFG_ENABLE_MASK); + } + + return 0; +} + +static const struct dma_ops bcm6348_iudma_ops = { + .disable = bcm6348_iudma_disable, + .enable = bcm6348_iudma_enable, + .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf, + .request = bcm6348_iudma_request, + .receive = bcm6348_iudma_receive, + .send = bcm6348_iudma_send, +}; + +static const struct bcm6348_iudma_hw bcm6348_hw = { + .err_mask = (DMAD6348_ST_OV_ERR_MASK | + DMAD6348_ST_CRC_ERR_MASK | + DMAD6348_ST_RX_ERR_MASK | + DMAD6348_ST_OS_ERR_MASK | + DMAD6348_ST_UN_ERR_MASK), +}; + +static const struct bcm6348_iudma_hw bcm6368_hw = { + .err_mask = 0, +}; + +static const struct udevice_id bcm6348_iudma_ids[] = { + { + .compatible = "brcm,bcm6348-iudma", + .data = (ulong)&bcm6348_hw, + }, { + .compatible = "brcm,bcm6368-iudma", + .data = (ulong)&bcm6368_hw, + }, { /* sentinel */ } +}; + +static int bcm6348_iudma_probe(struct udevice *dev) +{ + struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct bcm6348_iudma_priv *priv = dev_get_priv(dev); + const struct bcm6348_iudma_hw *hw = + (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev); + fdt_addr_t addr; + uint8_t ch; + int i; + + uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM | + DMA_SUPPORTS_MEM_TO_DEV); + priv->hw = hw; + + /* try to enable clocks */ + for (i = 0; ; i++) { + struct clk clk; + int ret; + + ret = clk_get_by_index(dev, i, &clk); + if (ret < 0) + break; + if (clk_enable(&clk)) + pr_err("failed to enable clock %d\n", i); + clk_free(&clk); + } + + /* try to perform resets */ + for (i = 0; ; i++) { + struct reset_ctl reset; + int ret; + + ret = reset_get_by_index(dev, i, &reset); + if (ret < 0) + break; + if (reset_deassert(&reset)) + pr_err("failed to deassert reset %d\n", i); + reset_free(&reset); + } + + /* dma global base address */ + addr = devfdt_get_addr_name(dev, "dma"); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + priv->base = ioremap(addr, 0); + + /* dma channels base address */ + addr = devfdt_get_addr_name(dev, "dma-channels"); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + priv->chan = ioremap(addr, 0); + + /* dma sram base address */ + addr = devfdt_get_addr_name(dev, "dma-sram"); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + priv->sram = ioremap(addr, 0); + + /* disable dma controller */ + clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); + + /* get number of channels */ + priv->n_channels = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev), + "dma-channels", 8); + if (priv->n_channels > DMA_CHAN_MAX) + return -EINVAL; + + /* alloc channel private data pointers */ + priv->ch_priv = calloc(priv->n_channels, + sizeof(struct bcm6348_chan_priv*)); + if (!priv->ch_priv) + return -ENOMEM; + + /* stop dma channels */ + for (ch = 0; ch < priv->n_channels; ch++) + bcm6348_iudma_chan_stop(priv, ch); + + /* enable dma controller */ + setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK); + + return 0; +} + +U_BOOT_DRIVER(bcm6348_iudma) = { + .name = "bcm6348_iudma", + .id = UCLASS_DMA, + .of_match = bcm6348_iudma_ids, + .ops = &bcm6348_iudma_ops, + .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv), + .probe = bcm6348_iudma_probe, +};
BCM6348 IUDMA controller is present on multiple BMIPS (BCM63xx) SoCs. Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> --- v9: introduce dma_prepare_rcv_buf dma op: - bcm6348_iudma_chan_stop: switch to mdelay when resetting channel. - bcm6348_iudma_receive: track dirty dma descriptors and no longer trigger dma rx channel. - bcm6348_iudma_send: reorder to properly flush cache and set dma descriptor config. - bcm6348_iudma_prepare_rcv_buf: implemented to clear dirty dma descriptors. v8: Introduce changes from Grygorii Strashko v5: Several fixes and improvements: - Remove unused defines. - Increment rx descriptors. - Fix flow control issues. - Error checking now depends on hw. - Remove unneeded interrupts. v4: Fix issues reported by Grygorii Strashko and other fixes: - Remove usage of net_rx_packets as buffer. - Allocate dynamic rx buffer. - Check dma errors and discard invalid packets. v3: no changes v2: Fix dma rx burst config and select DMA_CHANNELS. drivers/dma/Kconfig | 9 + drivers/dma/Makefile | 1 + drivers/dma/bcm6348-iudma.c | 576 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 586 insertions(+) create mode 100644 drivers/dma/bcm6348-iudma.c