diff --git a/arch/arc/boot/dts/skeleton_haps_idu.dtsi b/arch/arc/boot/dts/skeleton_haps_idu.dtsi index 435aa35260b62e..761e045c14edf5 100644 --- a/arch/arc/boot/dts/skeleton_haps_idu.dtsi +++ b/arch/arc/boot/dts/skeleton_haps_idu.dtsi @@ -68,5 +68,12 @@ #interrupt-cells = <1>; interrupts = <23>; }; + + cdma: custer_dma { + compatible = "snps,cluster-dma-1.0"; + #interrupt-cells = <1>; + interrupts = <21>; + cdma-max-desc-len = <0x1FFFF>; + }; }; }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6bcdb4e6a0d1d3..0d612caa5b54d5 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -181,6 +181,15 @@ config DW_AXI_DMAC NOTE: This driver wasn't tested on 64 bit platform because of lack 64 bit platform with Synopsys DW AXI DMAC. +config DW_CDMA + tristate "Synopsys DesignWare ARC HS Hammerhead Cluster DMA driver" + depends on OF || COMPILE_TEST + depends on HAS_IOMEM + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for Synopsys DesignWare Cluster DMA. + config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" depends on ARCH_EP93XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 616d926cf2a5fa..5a6ecd7d19061f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ +obj-$(CONFIG_DW_CDMA) += dw-cdma/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o diff --git a/drivers/dma/dw-cdma/Makefile b/drivers/dma/dw-cdma/Makefile new file mode 100644 index 00000000000000..2329e939ff48cc --- /dev/null +++ b/drivers/dma/dw-cdma/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DW_CDMA) += dw-cdma-platform.o diff --git a/drivers/dma/dw-cdma/dw-cdma-platform.c b/drivers/dma/dw-cdma/dw-cdma-platform.c new file mode 100644 index 00000000000000..60c87f7da673b1 --- /dev/null +++ b/drivers/dma/dw-cdma/dw-cdma-platform.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * (C) 2023 Synopsys, Inc. (www.synopsys.com) + * + * Synopsys DesignWare ARC HS Hammerhead Cluster DMA driver. + * + * Author: Stanislav Bolshakov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dw-cdma.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + + +static struct cdma_chip chip; + +static void cdma_enable_ch(u32 ch) +{ + struct cdma_s_stat_t stat; + + stat.r = 0; /* Reset channel */ + stat.e = 1; /* Enable channel */ + WRITE_AUX(DMA_S_STATC_AUX(ch), stat); + + do { + READ_BCR(DMA_S_STATC_AUX(ch), stat); + if (stat.st == CDMA_IDLE || stat.st == CDMA_BUSY) + break; + } while (1); +} + +static void cdma_disable_ch(u32 ch) +{ + struct cdma_s_stat_t stat; + + stat.r = 0; /* Reset channel */ + stat.e = 0; /* Disable channel */ + WRITE_AUX(DMA_S_STATC_AUX(ch), stat); + + do { + READ_BCR(DMA_S_STATC_AUX(ch), stat); + } while (stat.st == CDMA_BUSY); +} + +static void cdma_reset_ch(u32 ch) +{ + struct cdma_s_stat_t stat; + + stat.r = 1; /* Reset channel */ + stat.e = 0; /* Enable channel */ + WRITE_AUX(DMA_S_STATC_AUX(ch), stat); + + do { + READ_BCR(DMA_S_STATC_AUX(ch), stat); + } while (stat.st == CDMA_BUSY); +} + +static inline int cdma_get_bit_done(u32 handle) +{ + u32 a, o, x; + + a = handle >> DMA_HANDLE_TO_N_WORD_OFFS; + o = 1 << (handle & DMA_HANDLE_BIT_MASK); + READ_BCR(DMA_S_DONESTATD_AUX(a), x); + x &= o; + return x != 0; +} + +static inline void cdma_clear_bit_done(u32 handle) +{ + u32 a, x; + + a = handle >> DMA_HANDLE_TO_N_WORD_OFFS; + x = 1 << (handle & DMA_HANDLE_BIT_MASK); + WRITE_AUX(DMA_S_DONESTATD_CLR_AUX(a), x); +} + +/* Get descriptor handle of the last started DMA */ +static inline u32 cdma_get_handle(void) +{ + struct cdma_c_stat_t stat; + u32 handle; + + do { + READ_BCR(DMA_C_STAT_AUX, stat); + } while (stat.b); + + READ_BCR(DMA_C_HANDLE_AUX, handle); + return handle; +} + +static int free_hw_desc_list(struct list_head *list_head) +{ + struct cdma_hw_desc *hw_desc, *hw_desc_temp; + + if (list_empty(list_head)) + return 1; + + list_for_each_entry_safe(hw_desc, hw_desc_temp, list_head, list) { + list_del(&hw_desc->list); + kfree(hw_desc); + } + return 0; +} + +static void vchan_desc_put(struct virt_dma_desc *vdesc) +{ + struct c_dma_desc *desc = vd_to_cdma_desc(vdesc); + unsigned long flags; + int done_flg; + + spin_lock_irqsave(&desc->chan->vc.lock, flags); + done_flg = free_hw_desc_list(&desc->hw_desc_alloc_list); + done_flg &= free_hw_desc_list(&desc->hw_desc_issued_list); + done_flg &= !free_hw_desc_list(&desc->hw_desc_finished_list); + spin_unlock_irqrestore(&desc->chan->vc.lock, flags); + + if (!done_flg) + dev_warn(chan2dev(desc->chan), "Put an active descriptor.\n"); + + kfree(desc); +} + +static void cdma_setup(struct cdma_chip *chip) +{ + struct cdma_c_ctrl_t ctrl_c; + struct cdma_s_ctrl_t ctrl_s; + struct cdma_s_prioc_t prio_ch; + struct dw_cluster_dma *dw = chip->dw; + struct c_dma_chan *chan; + int desc_per_ch; + int ii; + + /* Define number of descriptors per each chanel */ + desc_per_ch = dw->nr_descr / dw->nr_channels; + + for (ii = 0; ii < dw->nr_channels; ii++) { + cdma_reset_ch(ii); + + chan = &dw->channels[ii]; + chan->chip = chip; + chan->id = ii; + chan->desc_base_n = ii * desc_per_ch; + chan->desc_last_n = (ii + 1) * desc_per_ch - 1; + WRITE_AUX(DMA_S_BASEC_AUX(ii), chan->desc_base_n); /* Setup ch desc base */ + WRITE_AUX(DMA_S_LASTC_AUX(ii), chan->desc_last_n); /* Setup ch desc last */ + + /* Setup channels priority. 0-low prio / 1-hi prio */ + prio_ch.p = 0; + WRITE_AUX(DMA_S_PRIOC_AUX(ii), prio_ch); + + chan->vc.desc_free = vchan_desc_put; + vchan_init(&chan->vc, &dw->dma); + } + + /* 0 - User mode access is not allowed for client AUX regs */ + ctrl_c.u = 0; + WRITE_AUX(DMA_C_CTRL_AUX, ctrl_c); + + /* 0 - User mode access is not allowed for server AUX regs */ + ctrl_s.u = 0; + /* Raise an interrupt in case the client pushes a descriptor in a full channel */ + ctrl_s.o = 1; + /* Maximum bus burst length */ + ctrl_s.mlen = 0xF; + WRITE_AUX(DMA_S_CTRL_AUX, ctrl_s); +} + +static inline void cdma_next(dma_addr_t src, dma_addr_t dst, u32 len, struct cdma_c_attr_t a) +{ +#ifdef CONFIG_64BIT + WRITE_AUX64(DMA_C_SRC_AUX, src); + WRITE_AUX64(DMA_C_DST_AUX, dst); +#else + u32 vv; + + vv = (u32)src; + WRITE_AUX(DMA_C_SRC_AUX, vv); + vv = (u32)dst; + WRITE_AUX(DMA_C_DST_AUX, vv); +#endif /* CONFIG_64BIT */ + + WRITE_AUX(DMA_C_ATTR_AUX, a); + /* The length register triggers the actual dma_push message */ + WRITE_AUX(DMA_C_LEN_AUX, len); +} + +static inline void cdma_start(u32 c, /* Channel ID */ + dma_addr_t src, /* From byte address */ + dma_addr_t dst, /* To byte address */ + u32 len, /* DMA length in bytes */ + struct cdma_c_attr_t a) /* Attributes */ +{ + /* R/W accesses to this register will stall while DMA_C_STATUS_AUX.B bit is set */ + WRITE_AUX(DMA_C_CHAN_AUX, c); + cdma_next(src, dst, len, a); +} + +/* Is called in channel locked context */ +static inline void cdma_chan_xfer_continue(struct c_dma_chan *chan, struct c_dma_desc *desc) +{ + struct cdma_c_attr_t attr; + struct cdma_hw_desc *hw_desc; + + hw_desc = list_first_entry_or_null(&desc->hw_desc_alloc_list, struct cdma_hw_desc, list); + if (unlikely(hw_desc == NULL)) + return; + + if (unlikely(hw_desc->handle != -1)) { + dev_warn(chan2dev(desc->chan), "Was the descriptor already issued? handle=%d\n", + hw_desc->handle); + return; + } + + attr.d = 1; /* Will set DMA_S_DONESTATD_AUX when DMA server is done processing the descriptor */ + attr.i = 1; /* Trigger the interrupt when done + setup DMA_S_DONESTATD_AUX */ + attr.e = 0; /* Disable event mode */ + attr.sdom = 1; /* Source memory access- inner shareable */ + attr.ddom = 1; /* Destination memory access- inner shareable */ + attr.qosd = 0x0; /* Resource domain used with QoS feature */ + attr.pld = 0; /* Support L2 preloading */ + attr.arcache = 0x2; /* 0x2 DMA read transactions - modifiable (cacheable) */ + attr.awcache = 0x2; /* 0x2 DMA write transactions - modifiable (cacheable) */ + + cdma_enable_ch(chan->id); /* Enable channel */ + cdma_start(chan->id, hw_desc->src_adr, hw_desc->dst_adr, hw_desc->length, attr); + hw_desc->handle = cdma_get_handle(); /* Get the handle for the most recently pusshed descriptor */ + + list_move_tail(&hw_desc->list, &desc->hw_desc_issued_list); +} + +/* Is called in channel locked context */ +static void cdma_chan_start_first_queued(struct c_dma_chan *chan) +{ + struct virt_dma_desc *vd; + struct c_dma_desc *desc; + + /* Peek at the next descriptor to be processed */ + vd = vchan_next_desc(&chan->vc); + if (unlikely(!vd)) + return; /* Descriptor queue is empty */ + + desc = vd_to_cdma_desc(vd); + cdma_chan_xfer_continue(chan, desc); +} + +/* Is called from interrupt */ +static inline void cdma_chan_xfer_complete(struct c_dma_chan *chan, u32 handle) +{ + struct virt_dma_desc *vd; + struct c_dma_desc *desc; + struct cdma_hw_desc *hw_desc; + unsigned long flags; + bool done_flg; + bool entry_notfound = true; + + spin_lock_irqsave(&chan->vc.lock, flags); + + /* The completed descriptor currently is in the head of vc list */ + vd = vchan_next_desc(&chan->vc); + if (unlikely(!vd)) { + spin_unlock_irqrestore(&chan->vc.lock, flags); + dev_warn(chan2dev(chan), "Completed descriptor list is empty!\n"); + return; + } + desc = vd_to_cdma_desc(vd); + + list_for_each_entry(hw_desc, &desc->hw_desc_issued_list, list) { + if (hw_desc->handle == handle) { + list_move_tail(&hw_desc->list, &desc->hw_desc_finished_list); + entry_notfound = false; + break; + } + } + + if (unlikely(entry_notfound)) + dev_warn(chan2dev(chan), "Didn't find an issued descriptor to complete."); + + done_flg = list_empty(&desc->hw_desc_alloc_list) && list_empty(&desc->hw_desc_issued_list); + if (done_flg) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + + /* Submit queued descriptors after processing the completed ones */ + cdma_chan_start_first_queued(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status cdma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dchan, cookie, txstate); +} + +static void process_error_chan(struct c_dma_chan *chan) +{ + struct virt_dma_desc *vd; + struct c_dma_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + cdma_reset_ch(chan->id); + + /* The bad descriptor currently is in the head of vc list */ + vd = vchan_next_desc(&chan->vc); + /* Remove the completed descriptor from issued list */ + list_del(&vd->node); + + desc = vd_to_cdma_desc(vd); + list_splice_tail(&desc->hw_desc_alloc_list, &desc->hw_desc_finished_list); + list_splice_tail(&desc->hw_desc_issued_list, &desc->hw_desc_finished_list); + + /* WARN about bad descriptor */ + dev_warn(chan2dev(chan), + "Bad descriptor submitted for cookie: %d\n", vd->tx.cookie); + + vchan_cookie_complete(vd); + + /* Try to restart the controller */ + cdma_chan_start_first_queued(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static noinline void cdma_chan_handle_err(struct dw_cluster_dma *dw, u32 err_handle) +{ + struct c_dma_chan *chan, *err_chan = NULL; + int ii; + + if (err_handle != -1) { + /* Handle bus error */ + for (ii = 0; ii < dw->nr_channels; ii++) { + chan = &dw->channels[ii]; + if (err_handle < chan->desc_base_n || err_handle > chan->desc_last_n) + continue; + dev_warn(chan2dev(chan), "Bus error.\n"); + process_error_chan(chan); + err_chan = chan; + break; + } + if (err_chan == NULL) { + for (ii = 0; ii < dw->nr_channels; ii++) { + chan = &dw->channels[ii]; + dev_warn(chan2dev(chan), "Bus error.\n"); + process_error_chan(chan); + } + } + } else { + /* Handle overflow. + * Since we don't know from HW which channel caused overflow, + * terminate them all + */ + for (ii = 0; ii < dw->nr_channels; ii++) { + chan = &dw->channels[ii]; + dev_warn(chan2dev(chan), "Channel is overflowed.\n"); + process_error_chan(chan); + } + } +} + +static irqreturn_t cdma_interrupt(int irq, void *data) +{ + struct dw_cluster_dma *dw = chip.dw; + struct cdma_c_intstat_t intstat; + struct c_dma_chan *chan; + u32 err_handle; + int ii, jj; + int done; + + READ_BCR(DMA_C_INTSTAT_AUX, intstat); + if (unlikely(intstat.b)) { + READ_BCR(DMA_C_ERRHANDLE_AUX, err_handle); + cdma_chan_handle_err(dw, err_handle); + return IRQ_HANDLED; + } + if (unlikely(intstat.o)) { + cdma_chan_handle_err(dw, -1); + return IRQ_HANDLED; + } + if (unlikely((intstat.b == 0) && (intstat.o == 0) && (intstat.d == 0))) { + /* Shouldn't happen */ + for (ii = 0; ii < dw->nr_channels; ii++) { + chan = &dw->channels[ii]; + dev_warn(chan2dev(chan), "Interrupt flags are not aligned.\n"); + } + } + WRITE_AUX(DMA_C_INTSTAT_CLR_AUX, intstat); + + for (ii = 0; ii < dw->nr_channels; ii++) { + chan = &dw->channels[ii]; + for (jj = chan->desc_base_n; jj <= chan->desc_last_n; jj++) { + done = cdma_get_bit_done(jj); + if (done) { + cdma_clear_bit_done(jj); + cdma_chan_xfer_complete(chan, jj); + } + } + } + + return IRQ_HANDLED; +} + +static void cdma_chan_issue_pending(struct dma_chan *dchan) +{ + struct c_dma_chan *chan = dchan_to_cdma_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (vchan_issue_pending(&chan->vc)) + cdma_chan_start_first_queued(chan); + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static int cdma_chan_terminate_all(struct dma_chan *dchan) +{ + struct c_dma_chan *chan = dchan_to_cdma_chan(dchan); + unsigned long flags; + int desc_done; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vc.lock, flags); + desc_done = list_empty(&chan->vc.desc_allocated) && + list_empty(&chan->vc.desc_submitted) && + list_empty(&chan->vc.desc_issued); + if (!desc_done) { + spin_unlock_irqrestore(&chan->vc.lock, flags); + return 0; + } + + cdma_reset_ch(chan->id); + + /* Obtain all submitted and issued descriptors, vc.lock must be held by caller */ + vchan_get_all_descriptors(&chan->vc, &head); + spin_unlock_irqrestore(&chan->vc.lock, flags); + + vchan_dma_desc_free_list(&chan->vc, &head); + + return 0; +} + +static void cdma_chan_free_chan_resources(struct dma_chan *dchan) +{ + struct c_dma_chan *chan = dchan_to_cdma_chan(dchan); + + cdma_disable_ch(chan->id); + vchan_free_chan_resources(&chan->vc); +} + +static struct dma_async_tx_descriptor *cdma_chan_prep_memcpy(struct dma_chan *dchan, + dma_addr_t dst_adr, dma_addr_t src_adr, size_t len, unsigned long flags) +{ + struct c_dma_chan *chan = dchan_to_cdma_chan(dchan); + struct c_dma_desc *desc; + struct cdma_hw_desc *hw_desc = NULL; + size_t desc_limit = chan->chip->dw->max_desc_len; + size_t cur_len; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + INIT_LIST_HEAD(&desc->hw_desc_alloc_list); + INIT_LIST_HEAD(&desc->hw_desc_issued_list); + INIT_LIST_HEAD(&desc->hw_desc_finished_list); + + while (len > 0) { + hw_desc = kzalloc(sizeof(*hw_desc), GFP_NOWAIT); + if (!hw_desc) { + free_hw_desc_list(&desc->hw_desc_alloc_list); + kfree(desc); + return NULL; + } + + cur_len = len; + if (cur_len > desc_limit) + cur_len = desc_limit; + + hw_desc->src_adr = src_adr; + hw_desc->dst_adr = dst_adr; + hw_desc->length = cur_len; + hw_desc->handle = -1; + list_add_tail(&hw_desc->list, &desc->hw_desc_alloc_list); + + len -= cur_len; + src_adr += cur_len; + dst_adr += cur_len; + } + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); +} + +static void cdma_synchronize(struct dma_chan *dchan) +{ + struct c_dma_chan *chan = dchan_to_cdma_chan(dchan); + + vchan_synchronize(&chan->vc); +} + +static int read_config(struct cdma_chip *chip) +{ + struct device *dev = chip->dev; + union cdma_build_t bcr; + struct dw_cluster_dma *dw = chip->dw; + int ret; + u32 tmp; + + bcr.val = read_aux_reg(DMA_BUILD); + if (bcr.ver < DW_CDMA_MIN_VERSION) + return -1; + + dw->nr_channels = 1 << bcr.cnum; + dw->nr_descr = 1 << bcr.dnum; + dw->nr_max_brst = (1 << bcr.mlen) * 4; + dw->nr_trans = (1 << bcr.ntrans) * 4; + + ret = device_property_read_u32(dev, "cdma-max-desc-len", &tmp); + if (ret) + return ret; + + /* The maximum transfer length for the cdma descriptor */ + dw->max_desc_len = tmp; + + return 0; +} + +static void cdma_hw_init(struct cdma_chip *chip) +{ + int ret; + + cdma_setup(chip); +#ifdef CONFIG_64BIT + ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64)); +#else + ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(32)); +#endif + if (ret) + dev_warn(chip->dev, "Unable to set coherent mask\n"); +} + +static void cdma_cpu_irq_init(void *data) +{ + int irq = *(int *)data; + + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static int dw_cdma_probe(struct platform_device *pdev) +{ + struct dw_cluster_dma *dw = NULL; + int ret; + + dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL); + if (!dw) { + ret = -ENOMEM; + goto exit; + } + + chip.dw = dw; + chip.dev = &pdev->dev; + + ret = read_config(&chip); + if (ret != 0) { + ret = -ENODEV; + goto exit; + } + + chip.irq = platform_get_irq(pdev, 0); + if (chip.irq < 0) { + ret = chip.irq; + goto exit; + } + + ret = request_percpu_irq(chip.irq, cdma_interrupt, "CDMA", &chip.pcpu); + if (!ret) { + on_each_cpu(cdma_cpu_irq_init, &chip.irq, 1); + } else { + dev_warn(&pdev->dev, "Failed to request IRQ per cpu.\n"); + goto exit; + } + + dw->channels = devm_kcalloc(chip.dev, dw->nr_channels, sizeof(*dw->channels), GFP_KERNEL); + if (!dw->channels) { + ret = -ENOMEM; + goto exit; + } + + INIT_LIST_HEAD(&dw->dma.channels); + + /* The device is only able to perform memory to memory copies */ + dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + + /* DMA capabilities */ + dw->dma.dev = chip.dev; + dw->dma.chancnt = dw->nr_channels; + dw->dma.max_burst = dw->nr_max_brst; + dw->dma.dev->dma_parms = &dw->dma_parms; + dw->dma.src_addr_widths = CDMA_BUSWIDTHS; + dw->dma.dst_addr_widths = CDMA_BUSWIDTHS; + dw->dma.directions = BIT(DMA_MEM_TO_MEM); + /* No support for residue reporting */ + dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + dw->dma.device_tx_status = cdma_chan_tx_status; + dw->dma.device_issue_pending = cdma_chan_issue_pending; + dw->dma.device_terminate_all = cdma_chan_terminate_all; + dw->dma.device_free_chan_resources = cdma_chan_free_chan_resources; + dw->dma.device_prep_dma_memcpy = cdma_chan_prep_memcpy; + dw->dma.device_synchronize = cdma_synchronize; + + cdma_hw_init(&chip); + + platform_set_drvdata(pdev, dw); + + ret = dma_async_device_register(&dw->dma); + if (ret) { + dev_warn(&pdev->dev, "Failed to register Cluster DMA\n"); + goto exit; + } + + dev_info(chip.dev, "DesignWare Cluster DMA, %d channel(s), IRQ# %d\n", + dw->nr_channels, chip.irq); + + return 0; + +exit: + devm_kfree(&pdev->dev, dw->channels); + devm_kfree(&pdev->dev, dw); + return ret; +} + +static const struct of_device_id dw_cdma_of_id_table[] = { + { .compatible = "snps,cluster-dma-1.0" }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); + +static struct platform_driver dw_cdma_driver = { + .probe = dw_cdma_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = dw_cdma_of_id_table, + }, +}; +module_platform_driver(dw_cdma_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare ARC HS Hammerhead Cluster DMA driver"); +MODULE_AUTHOR("Stanislav Bolshakov "); diff --git a/drivers/dma/dw-cdma/dw-cdma.h b/drivers/dma/dw-cdma/dw-cdma.h new file mode 100644 index 00000000000000..bb6d38d7d46a1a --- /dev/null +++ b/drivers/dma/dw-cdma/dw-cdma.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * (C) 2023 Synopsys, Inc. (www.synopsys.com) + * + * Synopsys DesignWare ARC HS Hammerhead Cluster DMA driver. + * + * Author: Stanislav Bolshakov + */ + +#ifndef _DW_CDMA_PLATFORM_H +#define _DW_CDMA_PLATFORM_H + +#include +#include +#include +#include "../virt-dma.h" + +enum ch_state { + CDMA_IDLE, + CDMA_BUSY, + CDMA_STOPPED, + CDMA_RESET +}; + +struct cdma_hw_desc { + dma_addr_t src_adr; + dma_addr_t dst_adr; + u32 length; + u32 handle; + struct list_head list; +}; + +struct c_dma_desc { + struct virt_dma_desc vd; + struct c_dma_chan *chan; + struct list_head hw_desc_alloc_list; + struct list_head hw_desc_issued_list; + struct list_head hw_desc_finished_list; +}; + +struct c_dma_chan { + struct virt_dma_chan vc; + struct cdma_chip *chip; + int desc_base_n; + int desc_last_n; + int id; +}; + +struct dw_cluster_dma { + struct dma_device dma; + struct device_dma_parameters dma_parms; + int nr_channels; + int nr_descr; + int nr_max_brst; + int nr_trans; + u32 max_desc_len; + struct c_dma_chan *channels; + int cur_ch; +}; + +struct cdma_chip { + struct device *dev; + struct dw_cluster_dma *dw; + int irq; + int __percpu pcpu; +}; + +static inline struct c_dma_desc *vd_to_cdma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct c_dma_desc, vd); +} + +static inline struct device *chan2dev(struct c_dma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline struct c_dma_chan *vc_to_cdma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct c_dma_chan, vc); +} + +static inline struct c_dma_chan *dchan_to_cdma_chan(struct dma_chan *dchan) +{ + return vc_to_cdma_chan(to_virt_chan(dchan)); +} + +/* CDMA client registers */ +#define DMA_BUILD 0x0E6 +#define DMA_AUX_BASE 0xD00 +#define DMA_C_CTRL_AUX (DMA_AUX_BASE + 0x0) /* DMA Client Control */ +#define DMA_C_CHAN_AUX (DMA_AUX_BASE + 0x1) /* DMA Client Channel Select */ +#define DMA_C_SRC_AUX (DMA_AUX_BASE + 0x2) /* DMA Client Source Address */ +#define DMA_C_SRC_HI_AUX (DMA_AUX_BASE + 0x3) /* DMA Client Source High Address */ +#define DMA_C_DST_AUX (DMA_AUX_BASE + 0x4) /* DMA Client Destination Address */ +#define DMA_C_DST_HI_AUX (DMA_AUX_BASE + 0x5) /* DMA Client Destination High Address */ +#define DMA_C_ATTR_AUX (DMA_AUX_BASE + 0x6) /* DMA Client Attributes */ +#define DMA_C_LEN_AUX (DMA_AUX_BASE + 0x7) /* DMA Client Length */ +#define DMA_C_HANDLE_AUX (DMA_AUX_BASE + 0x8) /* DMA Client Handle */ +#define DMA_C_EVSTAT_AUX (DMA_AUX_BASE + 0xA) /* DMA Event Status */ +#define DMA_C_EVSTAT_CLR_AUX (DMA_AUX_BASE + 0xB) /* DMA Client Clear Event Status */ +#define DMA_C_STAT_AUX (DMA_AUX_BASE + 0xC) /* DMA Client Status */ +#define DMA_C_INTSTAT_AUX (DMA_AUX_BASE + 0xD) /* DMA Interrupt Status */ +#define DMA_C_INTSTAT_CLR_AUX (DMA_AUX_BASE + 0xE) /* DMA Client Clear Interrupt Status */ +#define DMA_C_ERRHANDLE_AUX (DMA_AUX_BASE + 0xF) /* DMA Client Error Handle */ + +/* CDMA server registers */ +#define DMA_S_CTRL_AUX (DMA_AUX_BASE + 0x10) /* DMA Server Control */ +#define DMA_S_DONESTATD_AUX(d) (DMA_AUX_BASE + 0x20 + (d)) /* DMA Server Done Status */ +#define DMA_S_DONESTATD_CLR_AUX(d) (DMA_AUX_BASE + 0x40 + (d)) /* DMA Server Clear Done Status */ +#define DMA_S_BASEC_AUX(ch) (DMA_AUX_BASE + 0x83 + (ch)*8) /* DMA Server Channel Base */ +#define DMA_S_LASTC_AUX(ch) (DMA_AUX_BASE + 0x84 + (ch)*8) /* DMA Server Channel Last */ +#define DMA_S_PRIOC_AUX(ch) (DMA_AUX_BASE + 0x85 + (ch)*8) /* DMA Channel Priority */ +#define DMA_S_STATC_AUX(ch) (DMA_AUX_BASE + 0x86 + (ch)*8) /* DMA Channel Control */ + +#define DMA_HANDLE_TO_N_WORD_OFFS 5 /* Get register word offset from a handle */ +#define DMA_HANDLE_BIT_MASK 0x1F /* Get active bit position from a handle */ +#define DW_CDMA_MIN_VERSION 0x10 + +/* + * The set of bus widths supported by the CDMA controller. DW CDMA supports + * fixed master data bus width 128 bits + */ +#define CDMA_BUSWIDTHS \ + (DMA_SLAVE_BUSWIDTH_1_BYTE | \ + DMA_SLAVE_BUSWIDTH_2_BYTES | \ + DMA_SLAVE_BUSWIDTH_4_BYTES | \ + DMA_SLAVE_BUSWIDTH_8_BYTES | \ + DMA_SLAVE_BUSWIDTH_16_BYTES) + +/* CDMA structures */ +union cdma_build_t { + struct{ +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res:7, desci:1, dw:1, bnum:3, ntrans:2, mlen:2, dnum:4, cnum:4, ver:8; +#else + u32 ver:8, cnum:4, dnum:4, mlen:2, ntrans:2, bnum:3, dw:1, desci:1, res:7; +#endif + }; + u32 val; +}; + +struct cdma_c_attr_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res2:10, awcache:4, arcache:4, pld:1, qosd:3, ddom:2, sdom:2, res1:3, e:1, i:1, d:1; +#else + u32 d:1, i:1, e:1, res1:3, sdom:2, ddom:2, qosd:3, pld:1, arcache:4, awcache:4, res2:10; +#endif +}; + +struct cdma_s_stat_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res2:21, st:2, f:1, res1:6, r:1, e:1; +#else + u32 e:1, r:1, res1:6, f:1, st:2, res2:21; +#endif +}; + +struct cdma_c_ctrl_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res:31, u:1; +#else + u32 u:1, res:31; +#endif +}; + +struct cdma_s_ctrl_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res2:20, mlen:4, res1:6, o:1, u:1; +#else + u32 u:1, o:1, res1:6, mlen:4, res2:20; +#endif +}; + +struct cdma_s_prioc_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res:31, p:1; +#else + u32 p:1, res:31; +#endif +}; + +struct cdma_c_stat_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res:31, b:1; +#else + u32 b:1, res:31; +#endif +}; + +struct cdma_c_intstat_t { +#ifdef CONFIG_CPU_BIG_ENDIAN + u32 res:29, o:1, b:1, d:1; +#else + u32 d:1, b:1, o:1, res:29; +#endif +}; + +#endif /* _DW_CDMA_PLATFORM_H */