forked from luck/tmp_suning_uos_patched
dmaengine updates for v5.3-rc1
- Add support in dmaengine core to do device node checks for DT devices and update bunch of drivers to use that and remove open coding from drivers - New driver/driver support for new hardware, namely: - MediaTek UART APDMA - Freescale i.mx7ulp edma2 - Synopsys eDMA IP core version 0 - Allwinner H6 DMA - Updates to axi-dma and support for interleaved cyclic transfers - Greg's debugfs return value check removals on drivers - Updates to stm32-dma, hsu, dw, pl330, tegra drivers -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdLKxYAAoJEHwUBw8lI4NHsH8P/AqYZpUlLthe5L4qItzM1Uf0 HqxsJYs0xworjSRml8uptx/TzjIgJnJfEk2PV5VA+0zJNz/HnH7lDH85wKDx1Ydl AatUuyAFRO3GZOup/hY0AEIPhoIMdg/3zS2aapjJmaEZCVK2eVKmcj0KMvO5g0cw tsmXm3O0xd2Na1ToslNyYgFfCn8ortuAeoKiXJxhivMbGjRfw4LW/RPgS17Vspvh mEuxNXFWAZ+DorgPF5BmDPZ+LXcGgCXGNIoj64W+VHaXU5yXnlky+6/0f7cEcFEd yl3hjXVwyAq5zIItIOmiuozZidi5yfoizXg4S2ZD3P4xXKZ5OZ9Gf/0SMyXUIErU pwGxo6ZgsBcEpAHtqySELQedttttID+jYYeWU6oDr2LOy3W3F7AHOEGg9l9ZllLh gRdIoz3PrMK1wy/9Ytl37xklZyBk+HJLkeoIAvjrNgNJ1YRKqcysUCwsmqO7SG3N HnIGx74sG8ChljT/yX5pElq3ip6qLdb4pJcsfxKJ9VSxsTZ3JNINGNQtvI19hKR/ 6sn/c1Rb5/S1WxINGr+2FxChxXF8OESCN6GIEu6mNYVBzQnNPzwgPxfAGCqdoOOH mqXXgYNePMaBGYXBkdgvP1CnqenRRmTYo/1L4QmI4Mve4xpd5zhx5cZt9FlQJ2Im /hVT8gZ6bIrutsVOy4rg =R+aC -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine updates from Vinod Koul: - Add support in dmaengine core to do device node checks for DT devices and update bunch of drivers to use that and remove open coding from drivers - New driver/driver support for new hardware, namely: - MediaTek UART APDMA - Freescale i.mx7ulp edma2 - Synopsys eDMA IP core version 0 - Allwinner H6 DMA - Updates to axi-dma and support for interleaved cyclic transfers - Greg's debugfs return value check removals on drivers - Updates to stm32-dma, hsu, dw, pl330, tegra drivers * tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits) dmaengine: Revert "dmaengine: fsl-edma: add i.mx7ulp edma2 version support" dmaengine: at_xdmac: check for non-empty xfers_list before invoking callback Documentation: dmaengine: clean up description of dmatest usage dmaengine: tegra210-adma: remove PM_CLK dependency dmaengine: fsl-edma: add i.mx7ulp edma2 version support dt-bindings: dma: fsl-edma: add new i.mx7ulp-edma dmaengine: fsl-edma-common: version check for v2 instead dmaengine: fsl-edma-common: move dmamux register to another single function dmaengine: fsl-edma: add drvdata for fsl-edma dmaengine: Revert "dmaengine: fsl-edma: support little endian for edma driver" dmaengine: rcar-dmac: Reject zero-length slave DMA requests dmaengine: dw: Enable iDMA 32-bit on Intel Elkhart Lake dmaengine: dw-edma: fix semicolon.cocci warnings dmaengine: sh: usb-dmac: Use [] to denote a flexible array member dmaengine: dmatest: timeout value of -1 should specify infinite wait dmaengine: dw: Distinguish ->remove() between DW and iDMA 32-bit dmaengine: fsl-edma: support little endian for edma driver dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width" dmagengine: pl330: add code to get reset property dt-bindings: pl330: document the optional resets property ...
This commit is contained in:
commit
47ebe00b68
|
@ -1,33 +0,0 @@
|
|||
* Mediatek UART APDMA Controller
|
||||
|
||||
Required properties:
|
||||
- compatible should contain:
|
||||
* "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA
|
||||
* "mediatek,mt6577-uart-dma" for MT6577 and all of the above
|
||||
|
||||
- reg: The base address of the APDMA register bank.
|
||||
|
||||
- interrupts: A single interrupt specifier.
|
||||
|
||||
- clocks : Must contain an entry for each entry in clock-names.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: The APDMA clock for register accesses
|
||||
|
||||
Examples:
|
||||
|
||||
apdma: dma-controller@11000380 {
|
||||
compatible = "mediatek,mt2712-uart-dma";
|
||||
reg = <0 0x11000380 0 0x400>;
|
||||
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 65 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 66 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 67 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 68 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 69 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 70 IRQ_TYPE_LEVEL_LOW>;
|
||||
clocks = <&pericfg CLK_PERI_AP_DMA>;
|
||||
clock-names = "apdma";
|
||||
#dma-cells = <1>;
|
||||
};
|
||||
|
|
@ -16,6 +16,9 @@ Optional properties:
|
|||
- dma-channels: contains the total number of DMA channels supported by the DMAC
|
||||
- dma-requests: contains the total number of DMA requests supported by the DMAC
|
||||
- arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
|
||||
- resets: contains an entry for each entry in reset-names.
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: must contain at least "dma", and optional is "dma-ocp".
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -9,15 +9,16 @@ group, DMAMUX0 or DMAMUX1, but not both.
|
|||
Required properties:
|
||||
- compatible :
|
||||
- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
|
||||
- "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
|
||||
- reg : Specifies base physical address(s) and size of the eDMA registers.
|
||||
The 1st region is eDMA control register's address and size.
|
||||
The 2nd and the 3rd regions are programmable channel multiplexing
|
||||
control register's address and size.
|
||||
- interrupts : A list of interrupt-specifiers, one for each entry in
|
||||
interrupt-names.
|
||||
- interrupt-names : Should contain:
|
||||
"edma-tx" - the transmission interrupt
|
||||
"edma-err" - the error interrupt
|
||||
interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel
|
||||
per transmission interrupt, total 16 channel interrupt and 1
|
||||
error interrupt(located in the last), no interrupt-names list on
|
||||
i.mx7ulp for clean on dts.
|
||||
- #dma-cells : Must be <2>.
|
||||
The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
|
||||
Specific request source can only be multiplexed by specific channels
|
||||
|
@ -28,6 +29,7 @@ Required properties:
|
|||
- clock-names : A list of channel group clock names. Should contain:
|
||||
"dmamux0" - clock name of mux0 group
|
||||
"dmamux1" - clock name of mux1 group
|
||||
Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp.
|
||||
- clocks : A list of phandle and clock-specifier pairs, one for each entry in
|
||||
clock-names.
|
||||
|
||||
|
@ -35,6 +37,10 @@ Optional properties:
|
|||
- big-endian: If present registers and hardware scatter/gather descriptors
|
||||
of the eDMA are implemented in big endian mode, otherwise in little
|
||||
mode.
|
||||
- interrupt-names : Should contain the below on vf610 similar SoC but not used
|
||||
on i.mx7ulp similar SoC:
|
||||
"edma-tx" - the transmission interrupt
|
||||
"edma-err" - the error interrupt
|
||||
|
||||
|
||||
Examples:
|
||||
|
@ -52,8 +58,36 @@ edma0: dma-controller@40018000 {
|
|||
clock-names = "dmamux0", "dmamux1";
|
||||
clocks = <&clks VF610_CLK_DMAMUX0>,
|
||||
<&clks VF610_CLK_DMAMUX1>;
|
||||
};
|
||||
}; /* vf610 */
|
||||
|
||||
edma1: dma-controller@40080000 {
|
||||
#dma-cells = <2>;
|
||||
compatible = "fsl,imx7ulp-edma";
|
||||
reg = <0x40080000 0x2000>,
|
||||
<0x40210000 0x1000>;
|
||||
dma-channels = <32>;
|
||||
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
|
||||
/* last is eDMA2-ERR interrupt */
|
||||
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-names = "dma", "dmamux0";
|
||||
clocks = <&pcc2 IMX7ULP_CLK_DMA1>,
|
||||
<&pcc2 IMX7ULP_CLK_DMA_MUX1>;
|
||||
}; /* i.mx7ulp */
|
||||
|
||||
* DMA clients
|
||||
DMA client drivers that uses the DMA function must use the format described
|
||||
|
|
54
Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
Normal file
54
Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
Normal file
|
@ -0,0 +1,54 @@
|
|||
* Mediatek UART APDMA Controller
|
||||
|
||||
Required properties:
|
||||
- compatible should contain:
|
||||
* "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA
|
||||
* "mediatek,mt6577-uart-dma" for MT6577 and all of the above
|
||||
|
||||
- reg: The base address of the APDMA register bank.
|
||||
|
||||
- interrupts: A single interrupt specifier.
|
||||
One interrupt per dma-requests, or 8 if no dma-requests property is present
|
||||
|
||||
- dma-requests: The number of DMA channels
|
||||
|
||||
- clocks : Must contain an entry for each entry in clock-names.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: The APDMA clock for register accesses
|
||||
|
||||
- mediatek,dma-33bits: Present if the DMA requires support
|
||||
|
||||
Examples:
|
||||
|
||||
apdma: dma-controller@11000400 {
|
||||
compatible = "mediatek,mt2712-uart-dma";
|
||||
reg = <0 0x11000400 0 0x80>,
|
||||
<0 0x11000480 0 0x80>,
|
||||
<0 0x11000500 0 0x80>,
|
||||
<0 0x11000580 0 0x80>,
|
||||
<0 0x11000600 0 0x80>,
|
||||
<0 0x11000680 0 0x80>,
|
||||
<0 0x11000700 0 0x80>,
|
||||
<0 0x11000780 0 0x80>,
|
||||
<0 0x11000800 0 0x80>,
|
||||
<0 0x11000880 0 0x80>,
|
||||
<0 0x11000900 0 0x80>,
|
||||
<0 0x11000980 0 0x80>;
|
||||
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
|
||||
<GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
|
||||
dma-requests = <12>;
|
||||
clocks = <&pericfg CLK_PERI_AP_DMA>;
|
||||
clock-names = "apdma";
|
||||
mediatek,dma-33bits;
|
||||
#dma-cells = <1>;
|
||||
};
|
|
@ -28,12 +28,17 @@ Example:
|
|||
};
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
For A64 DMA controller:
|
||||
For A64 and H6 DMA controller:
|
||||
|
||||
Required properties:
|
||||
- compatible: "allwinner,sun50i-a64-dma"
|
||||
- compatible: Must be one of
|
||||
"allwinner,sun50i-a64-dma"
|
||||
"allwinner,sun50i-h6-dma"
|
||||
- dma-channels: Number of DMA channels supported by the controller.
|
||||
Refer to Documentation/devicetree/bindings/dma/dma.txt
|
||||
- clocks: In addition to parent AHB clock, it should also contain mbus
|
||||
clock (H6 only)
|
||||
- clock-names: Should contain "bus" and "mbus" (H6 only)
|
||||
- all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells
|
||||
|
||||
Optional properties:
|
||||
|
|
|
@ -44,7 +44,8 @@ Example of usage::
|
|||
|
||||
dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1
|
||||
|
||||
Example of multi-channel test usage:
|
||||
Example of multi-channel test usage (new in the 5.0 kernel)::
|
||||
|
||||
% modprobe dmatest
|
||||
% echo 2000 > /sys/module/dmatest/parameters/timeout
|
||||
% echo 1 > /sys/module/dmatest/parameters/iterations
|
||||
|
@ -53,15 +54,18 @@ Example of multi-channel test usage:
|
|||
% echo dma0chan2 > /sys/module/dmatest/parameters/channel
|
||||
% echo 1 > /sys/module/dmatest/parameters/run
|
||||
|
||||
Note: the channel parameter should always be the last parameter set prior to
|
||||
running the test (setting run=1), this is because upon setting the channel
|
||||
parameter, that specific channel is requested using the dmaengine and a thread
|
||||
is created with the existing parameters. This thread is set as pending
|
||||
and will be executed once run is set to 1. Any parameters set after the thread
|
||||
is created are not applied.
|
||||
.. note::
|
||||
For all tests, starting in the 5.0 kernel, either single- or multi-channel,
|
||||
the channel parameter(s) must be set after all other parameters. It is at
|
||||
that time that the existing parameter values are acquired for use by the
|
||||
thread(s). All other parameters are shared. Therefore, if changes are made
|
||||
to any of the other parameters, and an additional channel specified, the
|
||||
(shared) parameters used for all threads will use the new values.
|
||||
After the channels are specified, each thread is set as pending. All threads
|
||||
begin execution when the run parameter is set to 1.
|
||||
|
||||
.. hint::
|
||||
available channel list could be extracted by running the following command::
|
||||
A list of available channels can be found by running the following command::
|
||||
|
||||
% ls -1 /sys/class/dma/
|
||||
|
||||
|
@ -204,6 +208,7 @@ Releasing Channels
|
|||
Channels can be freed by setting run to 0.
|
||||
|
||||
Example::
|
||||
|
||||
% echo dma0chan1 > /sys/module/dmatest/parameters/channel
|
||||
dmatest: Added 1 threads using dma0chan1
|
||||
% cat /sys/class/dma/dma0chan1/in_use
|
||||
|
|
|
@ -4683,6 +4683,13 @@ L: linux-mtd@lists.infradead.org
|
|||
S: Supported
|
||||
F: drivers/mtd/nand/raw/denali*
|
||||
|
||||
DESIGNWARE EDMA CORE IP DRIVER
|
||||
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/dma/dw-edma/
|
||||
F: include/linux/dma/edma.h
|
||||
|
||||
DESIGNWARE USB2 DRD IP DRIVER
|
||||
M: Minas Harutyunyan <hminas@synopsys.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
|
|
|
@ -103,6 +103,7 @@ config AXI_DMAC
|
|||
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
select REGMAP_MMIO
|
||||
help
|
||||
Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
|
||||
controller is often used in Analog Device's reference designs for FPGA
|
||||
|
@ -584,7 +585,7 @@ config TEGRA20_APB_DMA
|
|||
|
||||
config TEGRA210_ADMA
|
||||
tristate "NVIDIA Tegra210 ADMA support"
|
||||
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
|
||||
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -666,6 +667,8 @@ source "drivers/dma/qcom/Kconfig"
|
|||
|
||||
source "drivers/dma/dw/Kconfig"
|
||||
|
||||
source "drivers/dma/dw-edma/Kconfig"
|
||||
|
||||
source "drivers/dma/hsu/Kconfig"
|
||||
|
||||
source "drivers/dma/sh/Kconfig"
|
||||
|
|
|
@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
|
|||
obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
|
||||
obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
|
||||
obj-$(CONFIG_DW_DMAC_CORE) += dw/
|
||||
obj-$(CONFIG_DW_EDMA) += dw-edma/
|
||||
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
|
||||
obj-$(CONFIG_FSL_DMA) += fsldma.o
|
||||
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
|
||||
|
|
|
@ -2508,9 +2508,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs);
|
|||
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
|
||||
{
|
||||
/* Expose a simple debugfs interface to view all clocks */
|
||||
(void) debugfs_create_file(dev_name(&pl08x->adev->dev),
|
||||
S_IFREG | S_IRUGO, NULL, pl08x,
|
||||
&pl08x_debugfs_fops);
|
||||
debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
|
||||
NULL, pl08x, &pl08x_debugfs_fops);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -1568,11 +1568,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
|||
struct at_xdmac_desc *desc;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
|
||||
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
|
||||
txd = &desc->tx_dma_desc;
|
||||
if (!list_empty(&atchan->xfers_list)) {
|
||||
desc = list_first_entry(&atchan->xfers_list,
|
||||
struct at_xdmac_desc, xfer_node);
|
||||
txd = &desc->tx_dma_desc;
|
||||
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
||||
|
|
|
@ -164,7 +164,6 @@ struct sba_device {
|
|||
struct list_head reqs_free_list;
|
||||
/* DebugFS directory entries */
|
||||
struct dentry *root;
|
||||
struct dentry *stats;
|
||||
};
|
||||
|
||||
/* ====== Command helper routines ===== */
|
||||
|
@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev)
|
|||
|
||||
/* Create debugfs root entry */
|
||||
sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
|
||||
if (IS_ERR_OR_NULL(sba->root)) {
|
||||
dev_err(sba->dev, "failed to create debugfs root entry\n");
|
||||
sba->root = NULL;
|
||||
goto skip_debugfs;
|
||||
}
|
||||
|
||||
/* Create debugfs stats entry */
|
||||
sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
|
||||
sba_debugfs_stats_show);
|
||||
if (IS_ERR_OR_NULL(sba->stats))
|
||||
dev_err(sba->dev, "failed to create debugfs stats file\n");
|
||||
debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
|
||||
sba_debugfs_stats_show);
|
||||
|
||||
skip_debugfs:
|
||||
|
||||
/* Register DMA device with Linux async framework */
|
||||
|
|
|
@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void)
|
|||
|
||||
dma_dentry = debugfs_create_dir("dma", NULL);
|
||||
|
||||
(void) debugfs_create_file("status",
|
||||
S_IFREG | S_IRUGO,
|
||||
dma_dentry, NULL,
|
||||
&coh901318_debugfs_status_operations);
|
||||
debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL,
|
||||
&coh901318_debugfs_status_operations);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Driver for the Analog Devices AXI-DMAC core
|
||||
*
|
||||
* Copyright 2013-2015 Analog Devices Inc.
|
||||
* Copyright 2013-2019 Analog Devices Inc.
|
||||
* Author: Lars-Peter Clausen <lars@metafoo.de>
|
||||
*/
|
||||
|
||||
|
@ -18,7 +18,9 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fpga/adi-axi-common.h>
|
||||
|
||||
#include <dt-bindings/dma/axi-dmac.h>
|
||||
|
||||
|
@ -62,6 +64,8 @@
|
|||
#define AXI_DMAC_REG_STATUS 0x430
|
||||
#define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
|
||||
#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
|
||||
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
|
||||
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
|
||||
|
||||
#define AXI_DMAC_CTRL_ENABLE BIT(0)
|
||||
#define AXI_DMAC_CTRL_PAUSE BIT(1)
|
||||
|
@ -70,6 +74,10 @@
|
|||
#define AXI_DMAC_IRQ_EOT BIT(1)
|
||||
|
||||
#define AXI_DMAC_FLAG_CYCLIC BIT(0)
|
||||
#define AXI_DMAC_FLAG_LAST BIT(1)
|
||||
#define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
|
||||
|
||||
#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
|
||||
|
||||
/* The maximum ID allocated by the hardware is 31 */
|
||||
#define AXI_DMAC_SG_UNUSED 32U
|
||||
|
@ -82,12 +90,14 @@ struct axi_dmac_sg {
|
|||
unsigned int dest_stride;
|
||||
unsigned int src_stride;
|
||||
unsigned int id;
|
||||
unsigned int partial_len;
|
||||
bool schedule_when_free;
|
||||
};
|
||||
|
||||
struct axi_dmac_desc {
|
||||
struct virt_dma_desc vdesc;
|
||||
bool cyclic;
|
||||
bool have_partial_xfer;
|
||||
|
||||
unsigned int num_submitted;
|
||||
unsigned int num_completed;
|
||||
|
@ -108,8 +118,10 @@ struct axi_dmac_chan {
|
|||
unsigned int dest_type;
|
||||
|
||||
unsigned int max_length;
|
||||
unsigned int align_mask;
|
||||
unsigned int address_align_mask;
|
||||
unsigned int length_align_mask;
|
||||
|
||||
bool hw_partial_xfer;
|
||||
bool hw_cyclic;
|
||||
bool hw_2d;
|
||||
};
|
||||
|
@ -167,14 +179,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
|
|||
{
|
||||
if (len == 0)
|
||||
return false;
|
||||
if ((len & chan->align_mask) != 0) /* Not aligned */
|
||||
if ((len & chan->length_align_mask) != 0) /* Not aligned */
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
|
||||
{
|
||||
if ((addr & chan->align_mask) != 0) /* Not aligned */
|
||||
if ((addr & chan->address_align_mask) != 0) /* Not aligned */
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
@ -210,11 +222,13 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
|
|||
}
|
||||
|
||||
desc->num_submitted++;
|
||||
if (desc->num_submitted == desc->num_sgs) {
|
||||
if (desc->num_submitted == desc->num_sgs ||
|
||||
desc->have_partial_xfer) {
|
||||
if (desc->cyclic)
|
||||
desc->num_submitted = 0; /* Start again */
|
||||
else
|
||||
chan->next_desc = NULL;
|
||||
flags |= AXI_DMAC_FLAG_LAST;
|
||||
} else {
|
||||
chan->next_desc = desc;
|
||||
}
|
||||
|
@ -240,6 +254,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
|
|||
desc->num_sgs == 1)
|
||||
flags |= AXI_DMAC_FLAG_CYCLIC;
|
||||
|
||||
if (chan->hw_partial_xfer)
|
||||
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
|
||||
|
@ -252,6 +269,83 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
|
|||
struct axi_dmac_desc, vdesc.node);
|
||||
}
|
||||
|
||||
static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
|
||||
struct axi_dmac_sg *sg)
|
||||
{
|
||||
if (chan->hw_2d)
|
||||
return sg->x_len * sg->y_len;
|
||||
else
|
||||
return sg->x_len;
|
||||
}
|
||||
|
||||
static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
|
||||
{
|
||||
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
|
||||
struct axi_dmac_desc *desc;
|
||||
struct axi_dmac_sg *sg;
|
||||
u32 xfer_done, len, id, i;
|
||||
bool found_sg;
|
||||
|
||||
do {
|
||||
len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
|
||||
id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
|
||||
|
||||
found_sg = false;
|
||||
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
|
||||
for (i = 0; i < desc->num_sgs; i++) {
|
||||
sg = &desc->sg[i];
|
||||
if (sg->id == AXI_DMAC_SG_UNUSED)
|
||||
continue;
|
||||
if (sg->id == id) {
|
||||
desc->have_partial_xfer = true;
|
||||
sg->partial_len = len;
|
||||
found_sg = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found_sg)
|
||||
break;
|
||||
}
|
||||
|
||||
if (found_sg) {
|
||||
dev_dbg(dmac->dma_dev.dev,
|
||||
"Found partial segment id=%u, len=%u\n",
|
||||
id, len);
|
||||
} else {
|
||||
dev_warn(dmac->dma_dev.dev,
|
||||
"Not found partial segment id=%u, len=%u\n",
|
||||
id, len);
|
||||
}
|
||||
|
||||
/* Check if we have any more partial transfers */
|
||||
xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
|
||||
xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
|
||||
|
||||
} while (!xfer_done);
|
||||
}
|
||||
|
||||
static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
|
||||
struct axi_dmac_desc *active)
|
||||
{
|
||||
struct dmaengine_result *rslt = &active->vdesc.tx_result;
|
||||
unsigned int start = active->num_completed - 1;
|
||||
struct axi_dmac_sg *sg;
|
||||
unsigned int i, total;
|
||||
|
||||
rslt->result = DMA_TRANS_NOERROR;
|
||||
rslt->residue = 0;
|
||||
|
||||
/*
|
||||
* We get here if the last completed segment is partial, which
|
||||
* means we can compute the residue from that segment onwards
|
||||
*/
|
||||
for (i = start; i < active->num_sgs; i++) {
|
||||
sg = &active->sg[i];
|
||||
total = axi_dmac_total_sg_bytes(chan, sg);
|
||||
rslt->residue += (total - sg->partial_len);
|
||||
}
|
||||
}
|
||||
|
||||
static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
|
||||
unsigned int completed_transfers)
|
||||
{
|
||||
|
@ -263,6 +357,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
|
|||
if (!active)
|
||||
return false;
|
||||
|
||||
if (chan->hw_partial_xfer &&
|
||||
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
|
||||
axi_dmac_dequeue_partial_xfers(chan);
|
||||
|
||||
do {
|
||||
sg = &active->sg[active->num_completed];
|
||||
if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
|
||||
|
@ -276,10 +374,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
|
|||
start_next = true;
|
||||
}
|
||||
|
||||
if (sg->partial_len)
|
||||
axi_dmac_compute_residue(chan, active);
|
||||
|
||||
if (active->cyclic)
|
||||
vchan_cyclic_callback(&active->vdesc);
|
||||
|
||||
if (active->num_completed == active->num_sgs) {
|
||||
if (active->num_completed == active->num_sgs ||
|
||||
sg->partial_len) {
|
||||
if (active->cyclic) {
|
||||
active->num_completed = 0; /* wrap around */
|
||||
} else {
|
||||
|
@ -391,7 +493,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
|
|||
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
|
||||
segment_size = DIV_ROUND_UP(period_len, num_segments);
|
||||
/* Take care of alignment */
|
||||
segment_size = ((segment_size - 1) | chan->align_mask) + 1;
|
||||
segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
|
||||
|
||||
for (i = 0; i < num_periods; i++) {
|
||||
len = period_len;
|
||||
|
@ -561,6 +663,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
|
|||
desc->sg[0].y_len = 1;
|
||||
}
|
||||
|
||||
if (flags & DMA_CYCLIC)
|
||||
desc->cyclic = true;
|
||||
|
||||
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
|
||||
}
|
||||
|
||||
|
@ -574,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
|
|||
kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
|
||||
}
|
||||
|
||||
static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case AXI_DMAC_REG_IRQ_MASK:
|
||||
case AXI_DMAC_REG_IRQ_SOURCE:
|
||||
case AXI_DMAC_REG_IRQ_PENDING:
|
||||
case AXI_DMAC_REG_CTRL:
|
||||
case AXI_DMAC_REG_TRANSFER_ID:
|
||||
case AXI_DMAC_REG_START_TRANSFER:
|
||||
case AXI_DMAC_REG_FLAGS:
|
||||
case AXI_DMAC_REG_DEST_ADDRESS:
|
||||
case AXI_DMAC_REG_SRC_ADDRESS:
|
||||
case AXI_DMAC_REG_X_LENGTH:
|
||||
case AXI_DMAC_REG_Y_LENGTH:
|
||||
case AXI_DMAC_REG_DEST_STRIDE:
|
||||
case AXI_DMAC_REG_SRC_STRIDE:
|
||||
case AXI_DMAC_REG_TRANSFER_DONE:
|
||||
case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
|
||||
case AXI_DMAC_REG_STATUS:
|
||||
case AXI_DMAC_REG_CURRENT_SRC_ADDR:
|
||||
case AXI_DMAC_REG_CURRENT_DEST_ADDR:
|
||||
case AXI_DMAC_REG_PARTIAL_XFER_LEN:
|
||||
case AXI_DMAC_REG_PARTIAL_XFER_ID:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct regmap_config axi_dmac_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
|
||||
.readable_reg = axi_dmac_regmap_rdwr,
|
||||
.writeable_reg = axi_dmac_regmap_rdwr,
|
||||
};
|
||||
|
||||
/*
|
||||
* The configuration stored in the devicetree matches the configuration
|
||||
* parameters of the peripheral instance and allows the driver to know which
|
||||
|
@ -617,7 +760,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
|
|||
return ret;
|
||||
chan->dest_width = val / 8;
|
||||
|
||||
chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
|
||||
chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
|
||||
|
||||
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_MEM_TO_MEM;
|
||||
|
@ -631,9 +774,12 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void axi_dmac_detect_caps(struct axi_dmac *dmac)
|
||||
static int axi_dmac_detect_caps(struct axi_dmac *dmac)
|
||||
{
|
||||
struct axi_dmac_chan *chan = &dmac->chan;
|
||||
unsigned int version;
|
||||
|
||||
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
|
||||
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
|
||||
|
@ -647,6 +793,35 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac)
|
|||
chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
|
||||
if (chan->max_length != UINT_MAX)
|
||||
chan->max_length++;
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
|
||||
if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
|
||||
chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
|
||||
dev_err(dmac->dma_dev.dev,
|
||||
"Destination memory-mapped interface not supported.");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
|
||||
if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
|
||||
chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
|
||||
dev_err(dmac->dma_dev.dev,
|
||||
"Source memory-mapped interface not supported.");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
|
||||
chan->hw_partial_xfer = true;
|
||||
|
||||
if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
|
||||
chan->length_align_mask =
|
||||
axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
|
||||
} else {
|
||||
chan->length_align_mask = chan->address_align_mask;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axi_dmac_probe(struct platform_device *pdev)
|
||||
|
@ -722,7 +897,11 @@ static int axi_dmac_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
axi_dmac_detect_caps(dmac);
|
||||
ret = axi_dmac_detect_caps(dmac);
|
||||
if (ret)
|
||||
goto err_clk_disable;
|
||||
|
||||
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
|
||||
|
||||
|
@ -742,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, dmac);
|
||||
|
||||
devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_of:
|
||||
|
|
|
@ -156,7 +156,6 @@ struct jz4780_dma_dev {
|
|||
};
|
||||
|
||||
struct jz4780_dma_filter_data {
|
||||
struct device_node *of_node;
|
||||
uint32_t transfer_type;
|
||||
int channel;
|
||||
};
|
||||
|
@ -772,8 +771,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
|
|||
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
|
||||
struct jz4780_dma_filter_data *data = param;
|
||||
|
||||
if (jzdma->dma_device.dev->of_node != data->of_node)
|
||||
return false;
|
||||
|
||||
if (data->channel > -1) {
|
||||
if (data->channel != jzchan->id)
|
||||
|
@ -797,7 +794,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
|
|||
if (dma_spec->args_count != 2)
|
||||
return NULL;
|
||||
|
||||
data.of_node = ofdma->of_node;
|
||||
data.transfer_type = dma_spec->args[0];
|
||||
data.channel = dma_spec->args[1];
|
||||
|
||||
|
@ -822,7 +818,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
|
|||
return dma_get_slave_channel(
|
||||
&jzdma->chan[data.channel].vchan.chan);
|
||||
} else {
|
||||
return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
|
||||
return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
|
||||
ofdma->of_node);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ static long dmaengine_ref_count;
|
|||
/* --- sysfs implementation --- */
|
||||
|
||||
/**
|
||||
* dev_to_dma_chan - convert a device pointer to the its sysfs container object
|
||||
* dev_to_dma_chan - convert a device pointer to its sysfs container object
|
||||
* @dev - device node
|
||||
*
|
||||
* Must be called under dma_list_mutex
|
||||
|
@ -629,11 +629,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
|
|||
* @mask: capabilities that the channel must satisfy
|
||||
* @fn: optional callback to disposition available channels
|
||||
* @fn_param: opaque parameter to pass to dma_filter_fn
|
||||
* @np: device node to look for DMA channels
|
||||
*
|
||||
* Returns pointer to appropriate DMA channel on success or NULL.
|
||||
*/
|
||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param)
|
||||
dma_filter_fn fn, void *fn_param,
|
||||
struct device_node *np)
|
||||
{
|
||||
struct dma_device *device, *_d;
|
||||
struct dma_chan *chan = NULL;
|
||||
|
@ -641,6 +643,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
|||
/* Find a channel */
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
|
||||
/* Finds a DMA controller with matching device node */
|
||||
if (np && device->dev->of_node && np != device->dev->of_node)
|
||||
continue;
|
||||
|
||||
chan = find_candidate(device, mask, fn, fn_param);
|
||||
if (!IS_ERR(chan))
|
||||
break;
|
||||
|
@ -699,7 +705,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
|
|||
chan = acpi_dma_request_slave_chan_by_name(dev, name);
|
||||
|
||||
if (chan) {
|
||||
/* Valid channel found or requester need to be deferred */
|
||||
/* Valid channel found or requester needs to be deferred */
|
||||
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
|
||||
return chan;
|
||||
}
|
||||
|
@ -757,7 +763,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
|
|||
if (!mask)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
chan = __dma_request_channel(mask, NULL, NULL);
|
||||
chan = __dma_request_channel(mask, NULL, NULL, NULL);
|
||||
if (!chan) {
|
||||
mutex_lock(&dma_list_mutex);
|
||||
if (list_empty(&dma_device_list))
|
||||
|
|
|
@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources,
|
|||
static int timeout = 3000;
|
||||
module_param(timeout, uint, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
|
||||
"Pass -1 for infinite timeout");
|
||||
"Pass 0xFFFFFFFF (4294967295) for maximum timeout");
|
||||
|
||||
static bool noverify;
|
||||
module_param(noverify, bool, S_IRUGO | S_IWUSR);
|
||||
|
@ -94,7 +94,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default
|
|||
* @iterations: iterations before stopping test
|
||||
* @xor_sources: number of xor source buffers
|
||||
* @pq_sources: number of p+q source buffers
|
||||
* @timeout: transfer timeout in msec, -1 for infinite timeout
|
||||
* @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
|
||||
*/
|
||||
struct dmatest_params {
|
||||
unsigned int buf_size;
|
||||
|
@ -105,7 +105,7 @@ struct dmatest_params {
|
|||
unsigned int iterations;
|
||||
unsigned int xor_sources;
|
||||
unsigned int pq_sources;
|
||||
int timeout;
|
||||
unsigned int timeout;
|
||||
bool noverify;
|
||||
bool norandom;
|
||||
int alignment;
|
||||
|
|
19
drivers/dma/dw-edma/Kconfig
Normal file
19
drivers/dma/dw-edma/Kconfig
Normal file
|
@ -0,0 +1,19 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
config DW_EDMA
|
||||
tristate "Synopsys DesignWare eDMA controller driver"
|
||||
depends on PCI && PCI_MSI
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Support the Synopsys DesignWare eDMA controller, normally
|
||||
implemented on endpoints SoCs.
|
||||
|
||||
config DW_EDMA_PCIE
|
||||
tristate "Synopsys DesignWare eDMA PCIe driver"
|
||||
depends on PCI && PCI_MSI
|
||||
select DW_EDMA
|
||||
help
|
||||
Provides a glue-logic between the Synopsys DesignWare
|
||||
eDMA controller and an endpoint PCIe device. This also serves
|
||||
as a reference design to whom desires to use this IP.
|
7
drivers/dma/dw-edma/Makefile
Normal file
7
drivers/dma/dw-edma/Makefile
Normal file
|
@ -0,0 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_DW_EDMA) += dw-edma.o
|
||||
dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o
|
||||
dw-edma-objs := dw-edma-core.o \
|
||||
dw-edma-v0-core.o $(dw-edma-y)
|
||||
obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o
|
937
drivers/dma/dw-edma/dw-edma-core.c
Normal file
937
drivers/dma/dw-edma/dw-edma-core.c
Normal file
|
@ -0,0 +1,937 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA core driver
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma/edma.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "dw-edma-core.h"
|
||||
#include "dw-edma-v0-core.h"
|
||||
#include "../dmaengine.h"
|
||||
#include "../virt-dma.h"
|
||||
|
||||
static inline
|
||||
struct device *dchan2dev(struct dma_chan *dchan)
|
||||
{
|
||||
return &dchan->dev->device;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct device *chan2dev(struct dw_edma_chan *chan)
|
||||
{
|
||||
return &chan->vc.chan.dev->device;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
|
||||
{
|
||||
return container_of(vd, struct dw_edma_desc, vd);
|
||||
}
|
||||
|
||||
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
|
||||
{
|
||||
struct dw_edma_burst *burst;
|
||||
|
||||
burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
|
||||
if (unlikely(!burst))
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&burst->list);
|
||||
if (chunk->burst) {
|
||||
/* Create and add new element into the linked list */
|
||||
chunk->bursts_alloc++;
|
||||
list_add_tail(&burst->list, &chunk->burst->list);
|
||||
} else {
|
||||
/* List head */
|
||||
chunk->bursts_alloc = 0;
|
||||
chunk->burst = burst;
|
||||
}
|
||||
|
||||
return burst;
|
||||
}
|
||||
|
||||
static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
|
||||
{
|
||||
struct dw_edma_chan *chan = desc->chan;
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
struct dw_edma_chunk *chunk;
|
||||
|
||||
chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
|
||||
if (unlikely(!chunk))
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&chunk->list);
|
||||
chunk->chan = chan;
|
||||
/* Toggling change bit (CB) in each chunk, this is a mechanism to
|
||||
* inform the eDMA HW block that this is a new linked list ready
|
||||
* to be consumed.
|
||||
* - Odd chunks originate CB equal to 0
|
||||
* - Even chunks originate CB equal to 1
|
||||
*/
|
||||
chunk->cb = !(desc->chunks_alloc % 2);
|
||||
chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
|
||||
chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
|
||||
|
||||
if (desc->chunk) {
|
||||
/* Create and add new element into the linked list */
|
||||
desc->chunks_alloc++;
|
||||
list_add_tail(&chunk->list, &desc->chunk->list);
|
||||
if (!dw_edma_alloc_burst(chunk)) {
|
||||
kfree(chunk);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
/* List head */
|
||||
chunk->burst = NULL;
|
||||
desc->chunks_alloc = 0;
|
||||
desc->chunk = chunk;
|
||||
}
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma_desc *desc;
|
||||
|
||||
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
|
||||
if (unlikely(!desc))
|
||||
return NULL;
|
||||
|
||||
desc->chan = chan;
|
||||
if (!dw_edma_alloc_chunk(desc)) {
|
||||
kfree(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
|
||||
{
|
||||
struct dw_edma_burst *child, *_next;
|
||||
|
||||
/* Remove all the list elements */
|
||||
list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
|
||||
list_del(&child->list);
|
||||
kfree(child);
|
||||
chunk->bursts_alloc--;
|
||||
}
|
||||
|
||||
/* Remove the list head */
|
||||
kfree(child);
|
||||
chunk->burst = NULL;
|
||||
}
|
||||
|
||||
static void dw_edma_free_chunk(struct dw_edma_desc *desc)
|
||||
{
|
||||
struct dw_edma_chunk *child, *_next;
|
||||
|
||||
if (!desc->chunk)
|
||||
return;
|
||||
|
||||
/* Remove all the list elements */
|
||||
list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
|
||||
dw_edma_free_burst(child);
|
||||
list_del(&child->list);
|
||||
kfree(child);
|
||||
desc->chunks_alloc--;
|
||||
}
|
||||
|
||||
/* Remove the list head */
|
||||
kfree(child);
|
||||
desc->chunk = NULL;
|
||||
}
|
||||
|
||||
static void dw_edma_free_desc(struct dw_edma_desc *desc)
|
||||
{
|
||||
dw_edma_free_chunk(desc);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
static void vchan_free_desc(struct virt_dma_desc *vdesc)
|
||||
{
|
||||
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
|
||||
}
|
||||
|
||||
static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma_chunk *child;
|
||||
struct dw_edma_desc *desc;
|
||||
struct virt_dma_desc *vd;
|
||||
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
if (!vd)
|
||||
return;
|
||||
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
child = list_first_entry_or_null(&desc->chunk->list,
|
||||
struct dw_edma_chunk, list);
|
||||
if (!child)
|
||||
return;
|
||||
|
||||
dw_edma_v0_core_start(child, !desc->xfer_sz);
|
||||
desc->xfer_sz += child->ll_region.sz;
|
||||
dw_edma_free_burst(child);
|
||||
list_del(&child->list);
|
||||
kfree(child);
|
||||
desc->chunks_alloc--;
|
||||
}
|
||||
|
||||
static int dw_edma_device_config(struct dma_chan *dchan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
|
||||
memcpy(&chan->config, config, sizeof(*config));
|
||||
chan->configured = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_edma_device_pause(struct dma_chan *dchan)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
int err = 0;
|
||||
|
||||
if (!chan->configured)
|
||||
err = -EPERM;
|
||||
else if (chan->status != EDMA_ST_BUSY)
|
||||
err = -EPERM;
|
||||
else if (chan->request != EDMA_REQ_NONE)
|
||||
err = -EPERM;
|
||||
else
|
||||
chan->request = EDMA_REQ_PAUSE;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dw_edma_device_resume(struct dma_chan *dchan)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
int err = 0;
|
||||
|
||||
if (!chan->configured) {
|
||||
err = -EPERM;
|
||||
} else if (chan->status != EDMA_ST_PAUSE) {
|
||||
err = -EPERM;
|
||||
} else if (chan->request != EDMA_REQ_NONE) {
|
||||
err = -EPERM;
|
||||
} else {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dw_edma_device_terminate_all(struct dma_chan *dchan)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
int err = 0;
|
||||
LIST_HEAD(head);
|
||||
|
||||
if (!chan->configured) {
|
||||
/* Do nothing */
|
||||
} else if (chan->status == EDMA_ST_PAUSE) {
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
chan->configured = false;
|
||||
} else if (chan->status == EDMA_ST_IDLE) {
|
||||
chan->configured = false;
|
||||
} else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
|
||||
/*
|
||||
* The channel is in a false BUSY state, probably didn't
|
||||
* receive or lost an interrupt
|
||||
*/
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
chan->configured = false;
|
||||
} else if (chan->request > EDMA_REQ_PAUSE) {
|
||||
err = -EPERM;
|
||||
} else {
|
||||
chan->request = EDMA_REQ_STOP;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dw_edma_device_issue_pending(struct dma_chan *dchan)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
if (chan->configured && chan->request == EDMA_REQ_NONE &&
|
||||
chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
}
|
||||
|
||||
static enum dma_status
|
||||
dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
struct dw_edma_desc *desc;
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
enum dma_status ret;
|
||||
u32 residue = 0;
|
||||
|
||||
ret = dma_cookie_status(dchan, cookie, txstate);
|
||||
if (ret == DMA_COMPLETE)
|
||||
return ret;
|
||||
|
||||
if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
|
||||
ret = DMA_PAUSED;
|
||||
|
||||
if (!txstate)
|
||||
goto ret_residue;
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
vd = vchan_find_desc(&chan->vc, cookie);
|
||||
if (vd) {
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (desc)
|
||||
residue = desc->alloc_sz - desc->xfer_sz;
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
|
||||
ret_residue:
|
||||
dma_set_residue(txstate, residue);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_edma_device_transfer(struct dw_edma_transfer *xfer)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
|
||||
enum dma_transfer_direction direction = xfer->direction;
|
||||
phys_addr_t src_addr, dst_addr;
|
||||
struct scatterlist *sg = NULL;
|
||||
struct dw_edma_chunk *chunk;
|
||||
struct dw_edma_burst *burst;
|
||||
struct dw_edma_desc *desc;
|
||||
u32 cnt;
|
||||
int i;
|
||||
|
||||
if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
|
||||
(direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
|
||||
return NULL;
|
||||
|
||||
if (xfer->cyclic) {
|
||||
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
|
||||
return NULL;
|
||||
} else {
|
||||
if (xfer->xfer.sg.len < 1)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!chan->configured)
|
||||
return NULL;
|
||||
|
||||
desc = dw_edma_alloc_desc(chan);
|
||||
if (unlikely(!desc))
|
||||
goto err_alloc;
|
||||
|
||||
chunk = dw_edma_alloc_chunk(desc);
|
||||
if (unlikely(!chunk))
|
||||
goto err_alloc;
|
||||
|
||||
src_addr = chan->config.src_addr;
|
||||
dst_addr = chan->config.dst_addr;
|
||||
|
||||
if (xfer->cyclic) {
|
||||
cnt = xfer->xfer.cyclic.cnt;
|
||||
} else {
|
||||
cnt = xfer->xfer.sg.len;
|
||||
sg = xfer->xfer.sg.sgl;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (!xfer->cyclic && !sg)
|
||||
break;
|
||||
|
||||
if (chunk->bursts_alloc == chan->ll_max) {
|
||||
chunk = dw_edma_alloc_chunk(desc);
|
||||
if (unlikely(!chunk))
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
burst = dw_edma_alloc_burst(chunk);
|
||||
if (unlikely(!burst))
|
||||
goto err_alloc;
|
||||
|
||||
if (xfer->cyclic)
|
||||
burst->sz = xfer->xfer.cyclic.len;
|
||||
else
|
||||
burst->sz = sg_dma_len(sg);
|
||||
|
||||
chunk->ll_region.sz += burst->sz;
|
||||
desc->alloc_sz += burst->sz;
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
burst->sar = src_addr;
|
||||
if (xfer->cyclic) {
|
||||
burst->dar = xfer->xfer.cyclic.paddr;
|
||||
} else {
|
||||
burst->dar = sg_dma_address(sg);
|
||||
/* Unlike the typical assumption by other
|
||||
* drivers/IPs the peripheral memory isn't
|
||||
* a FIFO memory, in this case, it's a
|
||||
* linear memory and that why the source
|
||||
* and destination addresses are increased
|
||||
* by the same portion (data length)
|
||||
*/
|
||||
src_addr += sg_dma_len(sg);
|
||||
}
|
||||
} else {
|
||||
burst->dar = dst_addr;
|
||||
if (xfer->cyclic) {
|
||||
burst->sar = xfer->xfer.cyclic.paddr;
|
||||
} else {
|
||||
burst->sar = sg_dma_address(sg);
|
||||
/* Unlike the typical assumption by other
|
||||
* drivers/IPs the peripheral memory isn't
|
||||
* a FIFO memory, in this case, it's a
|
||||
* linear memory and that why the source
|
||||
* and destination addresses are increased
|
||||
* by the same portion (data length)
|
||||
*/
|
||||
dst_addr += sg_dma_len(sg);
|
||||
}
|
||||
}
|
||||
|
||||
if (!xfer->cyclic)
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
|
||||
|
||||
err_alloc:
|
||||
if (desc)
|
||||
dw_edma_free_desc(desc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
|
||||
unsigned int len,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context)
|
||||
{
|
||||
struct dw_edma_transfer xfer;
|
||||
|
||||
xfer.dchan = dchan;
|
||||
xfer.direction = direction;
|
||||
xfer.xfer.sg.sgl = sgl;
|
||||
xfer.xfer.sg.len = len;
|
||||
xfer.flags = flags;
|
||||
xfer.cyclic = false;
|
||||
|
||||
return dw_edma_device_transfer(&xfer);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
|
||||
size_t len, size_t count,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct dw_edma_transfer xfer;
|
||||
|
||||
xfer.dchan = dchan;
|
||||
xfer.direction = direction;
|
||||
xfer.xfer.cyclic.paddr = paddr;
|
||||
xfer.xfer.cyclic.len = len;
|
||||
xfer.xfer.cyclic.cnt = count;
|
||||
xfer.flags = flags;
|
||||
xfer.cyclic = true;
|
||||
|
||||
return dw_edma_device_transfer(&xfer);
|
||||
}
|
||||
|
||||
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma_desc *desc;
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
|
||||
dw_edma_v0_core_clear_done_int(chan);
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
if (vd) {
|
||||
switch (chan->request) {
|
||||
case EDMA_REQ_NONE:
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (desc->chunks_alloc) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
} else {
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
}
|
||||
break;
|
||||
|
||||
case EDMA_REQ_STOP:
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
chan->request = EDMA_REQ_NONE;
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
break;
|
||||
|
||||
case EDMA_REQ_PAUSE:
|
||||
chan->request = EDMA_REQ_NONE;
|
||||
chan->status = EDMA_ST_PAUSE;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
}
|
||||
|
||||
static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
|
||||
dw_edma_v0_core_clear_abort_int(chan);
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
if (vd) {
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
chan->request = EDMA_REQ_NONE;
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
}
|
||||
|
||||
static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
|
||||
{
|
||||
struct dw_edma_irq *dw_irq = data;
|
||||
struct dw_edma *dw = dw_irq->dw;
|
||||
unsigned long total, pos, val;
|
||||
unsigned long off;
|
||||
u32 mask;
|
||||
|
||||
if (write) {
|
||||
total = dw->wr_ch_cnt;
|
||||
off = 0;
|
||||
mask = dw_irq->wr_mask;
|
||||
} else {
|
||||
total = dw->rd_ch_cnt;
|
||||
off = dw->wr_ch_cnt;
|
||||
mask = dw_irq->rd_mask;
|
||||
}
|
||||
|
||||
val = dw_edma_v0_core_status_done_int(dw, write ?
|
||||
EDMA_DIR_WRITE :
|
||||
EDMA_DIR_READ);
|
||||
val &= mask;
|
||||
for_each_set_bit(pos, &val, total) {
|
||||
struct dw_edma_chan *chan = &dw->chan[pos + off];
|
||||
|
||||
dw_edma_done_interrupt(chan);
|
||||
}
|
||||
|
||||
val = dw_edma_v0_core_status_abort_int(dw, write ?
|
||||
EDMA_DIR_WRITE :
|
||||
EDMA_DIR_READ);
|
||||
val &= mask;
|
||||
for_each_set_bit(pos, &val, total) {
|
||||
struct dw_edma_chan *chan = &dw->chan[pos + off];
|
||||
|
||||
dw_edma_abort_interrupt(chan);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
|
||||
{
|
||||
return dw_edma_interrupt(irq, data, true);
|
||||
}
|
||||
|
||||
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
|
||||
{
|
||||
return dw_edma_interrupt(irq, data, false);
|
||||
}
|
||||
|
||||
static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
|
||||
{
|
||||
dw_edma_interrupt(irq, data, true);
|
||||
dw_edma_interrupt(irq, data, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
{
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
|
||||
if (chan->status != EDMA_ST_IDLE)
|
||||
return -EBUSY;
|
||||
|
||||
pm_runtime_get(chan->chip->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_edma_free_chan_resources(struct dma_chan *dchan)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
int ret;
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
ret = dw_edma_device_terminate_all(dchan);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
if (time_after_eq(jiffies, timeout))
|
||||
return;
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
pm_runtime_put(chan->chip->dev);
|
||||
}
|
||||
|
||||
static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
|
||||
u32 wr_alloc, u32 rd_alloc)
|
||||
{
|
||||
struct dw_edma_region *dt_region;
|
||||
struct device *dev = chip->dev;
|
||||
struct dw_edma *dw = chip->dw;
|
||||
struct dw_edma_chan *chan;
|
||||
size_t ll_chunk, dt_chunk;
|
||||
struct dw_edma_irq *irq;
|
||||
struct dma_device *dma;
|
||||
u32 i, j, cnt, ch_cnt;
|
||||
u32 alloc, off_alloc;
|
||||
int err = 0;
|
||||
u32 pos;
|
||||
|
||||
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
|
||||
ll_chunk = dw->ll_region.sz;
|
||||
dt_chunk = dw->dt_region.sz;
|
||||
|
||||
/* Calculate linked list chunk for each channel */
|
||||
ll_chunk /= roundup_pow_of_two(ch_cnt);
|
||||
|
||||
/* Calculate linked list chunk for each channel */
|
||||
dt_chunk /= roundup_pow_of_two(ch_cnt);
|
||||
|
||||
if (write) {
|
||||
i = 0;
|
||||
cnt = dw->wr_ch_cnt;
|
||||
dma = &dw->wr_edma;
|
||||
alloc = wr_alloc;
|
||||
off_alloc = 0;
|
||||
} else {
|
||||
i = dw->wr_ch_cnt;
|
||||
cnt = dw->rd_ch_cnt;
|
||||
dma = &dw->rd_edma;
|
||||
alloc = rd_alloc;
|
||||
off_alloc = wr_alloc;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&dma->channels);
|
||||
for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
|
||||
chan = &dw->chan[i];
|
||||
|
||||
dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
|
||||
if (!dt_region)
|
||||
return -ENOMEM;
|
||||
|
||||
chan->vc.chan.private = dt_region;
|
||||
|
||||
chan->chip = chip;
|
||||
chan->id = j;
|
||||
chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
|
||||
chan->configured = false;
|
||||
chan->request = EDMA_REQ_NONE;
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
|
||||
chan->ll_off = (ll_chunk * i);
|
||||
chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
|
||||
|
||||
chan->dt_off = (dt_chunk * i);
|
||||
|
||||
dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
|
||||
write ? "write" : "read", j,
|
||||
chan->ll_off, chan->ll_max);
|
||||
|
||||
if (dw->nr_irqs == 1)
|
||||
pos = 0;
|
||||
else
|
||||
pos = off_alloc + (j % alloc);
|
||||
|
||||
irq = &dw->irq[pos];
|
||||
|
||||
if (write)
|
||||
irq->wr_mask |= BIT(j);
|
||||
else
|
||||
irq->rd_mask |= BIT(j);
|
||||
|
||||
irq->dw = dw;
|
||||
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
|
||||
|
||||
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
|
||||
write ? "write" : "read", j,
|
||||
chan->msi.address_hi, chan->msi.address_lo,
|
||||
chan->msi.data);
|
||||
|
||||
chan->vc.desc_free = vchan_free_desc;
|
||||
vchan_init(&chan->vc, dma);
|
||||
|
||||
dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
|
||||
dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
|
||||
dt_region->sz = dt_chunk;
|
||||
|
||||
dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
|
||||
write ? "write" : "read", j, chan->dt_off);
|
||||
|
||||
dw_edma_v0_core_device_config(chan);
|
||||
}
|
||||
|
||||
/* Set DMA channel capabilities */
|
||||
dma_cap_zero(dma->cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, dma->cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
|
||||
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
dma->chancnt = cnt;
|
||||
|
||||
/* Set DMA channel callbacks */
|
||||
dma->dev = chip->dev;
|
||||
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
|
||||
dma->device_free_chan_resources = dw_edma_free_chan_resources;
|
||||
dma->device_config = dw_edma_device_config;
|
||||
dma->device_pause = dw_edma_device_pause;
|
||||
dma->device_resume = dw_edma_device_resume;
|
||||
dma->device_terminate_all = dw_edma_device_terminate_all;
|
||||
dma->device_issue_pending = dw_edma_device_issue_pending;
|
||||
dma->device_tx_status = dw_edma_device_tx_status;
|
||||
dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
|
||||
dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
|
||||
|
||||
dma_set_max_seg_size(dma->dev, U32_MAX);
|
||||
|
||||
/* Register DMA device */
|
||||
err = dma_async_device_register(dma);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
|
||||
{
|
||||
if (*nr_irqs && *alloc < cnt) {
|
||||
(*alloc)++;
|
||||
(*nr_irqs)--;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
|
||||
{
|
||||
while (*mask * alloc < cnt)
|
||||
(*mask)++;
|
||||
}
|
||||
|
||||
static int dw_edma_irq_request(struct dw_edma_chip *chip,
|
||||
u32 *wr_alloc, u32 *rd_alloc)
|
||||
{
|
||||
struct device *dev = chip->dev;
|
||||
struct dw_edma *dw = chip->dw;
|
||||
u32 wr_mask = 1;
|
||||
u32 rd_mask = 1;
|
||||
int i, err = 0;
|
||||
u32 ch_cnt;
|
||||
|
||||
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
|
||||
|
||||
if (dw->nr_irqs < 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (dw->nr_irqs == 1) {
|
||||
/* Common IRQ shared among all channels */
|
||||
err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
|
||||
dw_edma_interrupt_common,
|
||||
IRQF_SHARED, dw->name, &dw->irq[0]);
|
||||
if (err) {
|
||||
dw->nr_irqs = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
|
||||
&dw->irq[0].msi);
|
||||
} else {
|
||||
/* Distribute IRQs equally among all channels */
|
||||
int tmp = dw->nr_irqs;
|
||||
|
||||
while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
|
||||
dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
|
||||
dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
|
||||
}
|
||||
|
||||
dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
|
||||
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
|
||||
|
||||
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
|
||||
err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
|
||||
i < *wr_alloc ?
|
||||
dw_edma_interrupt_write :
|
||||
dw_edma_interrupt_read,
|
||||
IRQF_SHARED, dw->name,
|
||||
&dw->irq[i]);
|
||||
if (err) {
|
||||
dw->nr_irqs = i;
|
||||
return err;
|
||||
}
|
||||
|
||||
get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
|
||||
&dw->irq[i].msi);
|
||||
}
|
||||
|
||||
dw->nr_irqs = i;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int dw_edma_probe(struct dw_edma_chip *chip)
|
||||
{
|
||||
struct device *dev = chip->dev;
|
||||
struct dw_edma *dw = chip->dw;
|
||||
u32 wr_alloc = 0;
|
||||
u32 rd_alloc = 0;
|
||||
int i, err;
|
||||
|
||||
raw_spin_lock_init(&dw->lock);
|
||||
|
||||
/* Find out how many write channels are supported by hardware */
|
||||
dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
|
||||
if (!dw->wr_ch_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
/* Find out how many read channels are supported by hardware */
|
||||
dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
|
||||
if (!dw->rd_ch_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
|
||||
dw->wr_ch_cnt, dw->rd_ch_cnt);
|
||||
|
||||
/* Allocate channels */
|
||||
dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
|
||||
sizeof(*dw->chan), GFP_KERNEL);
|
||||
if (!dw->chan)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
|
||||
|
||||
/* Disable eDMA, only to establish the ideal initial conditions */
|
||||
dw_edma_v0_core_off(dw);
|
||||
|
||||
/* Request IRQs */
|
||||
err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Setup write channels */
|
||||
err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
|
||||
if (err)
|
||||
goto err_irq_free;
|
||||
|
||||
/* Setup read channels */
|
||||
err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
|
||||
if (err)
|
||||
goto err_irq_free;
|
||||
|
||||
/* Power management */
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
/* Turn debugfs on */
|
||||
dw_edma_v0_core_debugfs_on(chip);
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq_free:
|
||||
for (i = (dw->nr_irqs - 1); i >= 0; i--)
|
||||
free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
|
||||
|
||||
dw->nr_irqs = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_edma_probe);
|
||||
|
||||
int dw_edma_remove(struct dw_edma_chip *chip)
|
||||
{
|
||||
struct dw_edma_chan *chan, *_chan;
|
||||
struct device *dev = chip->dev;
|
||||
struct dw_edma *dw = chip->dw;
|
||||
int i;
|
||||
|
||||
/* Disable eDMA */
|
||||
dw_edma_v0_core_off(dw);
|
||||
|
||||
/* Free irqs */
|
||||
for (i = (dw->nr_irqs - 1); i >= 0; i--)
|
||||
free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
|
||||
|
||||
/* Power management */
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
tasklet_kill(&chan->vc.task);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
tasklet_kill(&chan->vc.task);
|
||||
}
|
||||
|
||||
/* Deregister eDMA device */
|
||||
dma_async_device_unregister(&dw->wr_edma);
|
||||
dma_async_device_unregister(&dw->rd_edma);
|
||||
|
||||
/* Turn debugfs off */
|
||||
dw_edma_v0_core_debugfs_off();
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_edma_remove);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
|
||||
MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
|
165
drivers/dma/dw-edma/dw-edma-core.h
Normal file
165
drivers/dma/dw-edma/dw-edma-core.h
Normal file
|
@ -0,0 +1,165 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA core driver
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_CORE_H
|
||||
#define _DW_EDMA_CORE_H
|
||||
|
||||
#include <linux/msi.h>
|
||||
#include <linux/dma/edma.h>
|
||||
|
||||
#include "../virt-dma.h"
|
||||
|
||||
#define EDMA_LL_SZ 24
|
||||
|
||||
enum dw_edma_dir {
|
||||
EDMA_DIR_WRITE = 0,
|
||||
EDMA_DIR_READ
|
||||
};
|
||||
|
||||
enum dw_edma_mode {
|
||||
EDMA_MODE_LEGACY = 0,
|
||||
EDMA_MODE_UNROLL
|
||||
};
|
||||
|
||||
enum dw_edma_request {
|
||||
EDMA_REQ_NONE = 0,
|
||||
EDMA_REQ_STOP,
|
||||
EDMA_REQ_PAUSE
|
||||
};
|
||||
|
||||
enum dw_edma_status {
|
||||
EDMA_ST_IDLE = 0,
|
||||
EDMA_ST_PAUSE,
|
||||
EDMA_ST_BUSY
|
||||
};
|
||||
|
||||
struct dw_edma_chan;
|
||||
struct dw_edma_chunk;
|
||||
|
||||
struct dw_edma_burst {
|
||||
struct list_head list;
|
||||
u64 sar;
|
||||
u64 dar;
|
||||
u32 sz;
|
||||
};
|
||||
|
||||
struct dw_edma_region {
|
||||
phys_addr_t paddr;
|
||||
dma_addr_t vaddr;
|
||||
size_t sz;
|
||||
};
|
||||
|
||||
struct dw_edma_chunk {
|
||||
struct list_head list;
|
||||
struct dw_edma_chan *chan;
|
||||
struct dw_edma_burst *burst;
|
||||
|
||||
u32 bursts_alloc;
|
||||
|
||||
u8 cb;
|
||||
struct dw_edma_region ll_region; /* Linked list */
|
||||
};
|
||||
|
||||
struct dw_edma_desc {
|
||||
struct virt_dma_desc vd;
|
||||
struct dw_edma_chan *chan;
|
||||
struct dw_edma_chunk *chunk;
|
||||
|
||||
u32 chunks_alloc;
|
||||
|
||||
u32 alloc_sz;
|
||||
u32 xfer_sz;
|
||||
};
|
||||
|
||||
struct dw_edma_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct dw_edma_chip *chip;
|
||||
int id;
|
||||
enum dw_edma_dir dir;
|
||||
|
||||
off_t ll_off;
|
||||
u32 ll_max;
|
||||
|
||||
off_t dt_off;
|
||||
|
||||
struct msi_msg msi;
|
||||
|
||||
enum dw_edma_request request;
|
||||
enum dw_edma_status status;
|
||||
u8 configured;
|
||||
|
||||
struct dma_slave_config config;
|
||||
};
|
||||
|
||||
struct dw_edma_irq {
|
||||
struct msi_msg msi;
|
||||
u32 wr_mask;
|
||||
u32 rd_mask;
|
||||
struct dw_edma *dw;
|
||||
};
|
||||
|
||||
struct dw_edma {
|
||||
char name[20];
|
||||
|
||||
struct dma_device wr_edma;
|
||||
u16 wr_ch_cnt;
|
||||
|
||||
struct dma_device rd_edma;
|
||||
u16 rd_ch_cnt;
|
||||
|
||||
struct dw_edma_region rg_region; /* Registers */
|
||||
struct dw_edma_region ll_region; /* Linked list */
|
||||
struct dw_edma_region dt_region; /* Data */
|
||||
|
||||
struct dw_edma_irq *irq;
|
||||
int nr_irqs;
|
||||
|
||||
u32 version;
|
||||
enum dw_edma_mode mode;
|
||||
|
||||
struct dw_edma_chan *chan;
|
||||
const struct dw_edma_core_ops *ops;
|
||||
|
||||
raw_spinlock_t lock; /* Only for legacy */
|
||||
};
|
||||
|
||||
struct dw_edma_sg {
|
||||
struct scatterlist *sgl;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
struct dw_edma_cyclic {
|
||||
dma_addr_t paddr;
|
||||
size_t len;
|
||||
size_t cnt;
|
||||
};
|
||||
|
||||
struct dw_edma_transfer {
|
||||
struct dma_chan *dchan;
|
||||
union dw_edma_xfer {
|
||||
struct dw_edma_sg sg;
|
||||
struct dw_edma_cyclic cyclic;
|
||||
} xfer;
|
||||
enum dma_transfer_direction direction;
|
||||
unsigned long flags;
|
||||
bool cyclic;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc)
|
||||
{
|
||||
return container_of(vc, struct dw_edma_chan, vc);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
|
||||
{
|
||||
return vc2dw_edma_chan(to_virt_chan(dchan));
|
||||
}
|
||||
|
||||
#endif /* _DW_EDMA_CORE_H */
|
229
drivers/dma/dw-edma/dw-edma-pcie.c
Normal file
229
drivers/dma/dw-edma/dw-edma-pcie.c
Normal file
|
@ -0,0 +1,229 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA PCIe driver
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma/edma.h>
|
||||
#include <linux/pci-epf.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
#include "dw-edma-core.h"
|
||||
|
||||
struct dw_edma_pcie_data {
|
||||
/* eDMA registers location */
|
||||
enum pci_barno rg_bar;
|
||||
off_t rg_off;
|
||||
size_t rg_sz;
|
||||
/* eDMA memory linked list location */
|
||||
enum pci_barno ll_bar;
|
||||
off_t ll_off;
|
||||
size_t ll_sz;
|
||||
/* eDMA memory data location */
|
||||
enum pci_barno dt_bar;
|
||||
off_t dt_off;
|
||||
size_t dt_sz;
|
||||
/* Other */
|
||||
u32 version;
|
||||
enum dw_edma_mode mode;
|
||||
u8 irqs;
|
||||
};
|
||||
|
||||
static const struct dw_edma_pcie_data snps_edda_data = {
|
||||
/* eDMA registers location */
|
||||
.rg_bar = BAR_0,
|
||||
.rg_off = 0x00001000, /* 4 Kbytes */
|
||||
.rg_sz = 0x00002000, /* 8 Kbytes */
|
||||
/* eDMA memory linked list location */
|
||||
.ll_bar = BAR_2,
|
||||
.ll_off = 0x00000000, /* 0 Kbytes */
|
||||
.ll_sz = 0x00800000, /* 8 Mbytes */
|
||||
/* eDMA memory data location */
|
||||
.dt_bar = BAR_2,
|
||||
.dt_off = 0x00800000, /* 8 Mbytes */
|
||||
.dt_sz = 0x03800000, /* 56 Mbytes */
|
||||
/* Other */
|
||||
.version = 0,
|
||||
.mode = EDMA_MODE_UNROLL,
|
||||
.irqs = 1,
|
||||
};
|
||||
|
||||
static int dw_edma_pcie_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *pid)
|
||||
{
|
||||
const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_edma_chip *chip;
|
||||
int err, nr_irqs;
|
||||
struct dw_edma *dw;
|
||||
|
||||
/* Enable PCI device */
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err) {
|
||||
pci_err(pdev, "enabling device failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Mapping PCI BAR regions */
|
||||
err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
|
||||
BIT(pdata->ll_bar) |
|
||||
BIT(pdata->dt_bar),
|
||||
pci_name(pdev));
|
||||
if (err) {
|
||||
pci_err(pdev, "eDMA BAR I/O remapping failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* DMA configuration */
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (!err) {
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (err) {
|
||||
pci_err(pdev, "consistent DMA mask 64 set failed\n");
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
pci_err(pdev, "DMA mask 64 set failed\n");
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
pci_err(pdev, "DMA mask 32 set failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
pci_err(pdev, "consistent DMA mask 32 set failed\n");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Data structure allocation */
|
||||
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
|
||||
if (!chip)
|
||||
return -ENOMEM;
|
||||
|
||||
dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
|
||||
if (!dw)
|
||||
return -ENOMEM;
|
||||
|
||||
/* IRQs allocation */
|
||||
nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
|
||||
PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
if (nr_irqs < 1) {
|
||||
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
|
||||
nr_irqs);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Data structure initialization */
|
||||
chip->dw = dw;
|
||||
chip->dev = dev;
|
||||
chip->id = pdev->devfn;
|
||||
chip->irq = pdev->irq;
|
||||
|
||||
dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
|
||||
dw->rg_region.vaddr += pdata->rg_off;
|
||||
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
|
||||
dw->rg_region.paddr += pdata->rg_off;
|
||||
dw->rg_region.sz = pdata->rg_sz;
|
||||
|
||||
dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
|
||||
dw->ll_region.vaddr += pdata->ll_off;
|
||||
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
|
||||
dw->ll_region.paddr += pdata->ll_off;
|
||||
dw->ll_region.sz = pdata->ll_sz;
|
||||
|
||||
dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
|
||||
dw->dt_region.vaddr += pdata->dt_off;
|
||||
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
|
||||
dw->dt_region.paddr += pdata->dt_off;
|
||||
dw->dt_region.sz = pdata->dt_sz;
|
||||
|
||||
dw->version = pdata->version;
|
||||
dw->mode = pdata->mode;
|
||||
dw->nr_irqs = nr_irqs;
|
||||
|
||||
/* Debug info */
|
||||
pci_dbg(pdev, "Version:\t%u\n", dw->version);
|
||||
|
||||
pci_dbg(pdev, "Mode:\t%s\n",
|
||||
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
|
||||
|
||||
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
||||
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
|
||||
&dw->rg_region.vaddr, &dw->rg_region.paddr);
|
||||
|
||||
pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
||||
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
|
||||
&dw->ll_region.vaddr, &dw->ll_region.paddr);
|
||||
|
||||
pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
||||
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
|
||||
&dw->dt_region.vaddr, &dw->dt_region.paddr);
|
||||
|
||||
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
|
||||
|
||||
/* Validating if PCI interrupts were enabled */
|
||||
if (!pci_dev_msi_enabled(pdev)) {
|
||||
pci_err(pdev, "enable interrupt failed\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
|
||||
if (!dw->irq)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Starting eDMA driver */
|
||||
err = dw_edma_probe(chip);
|
||||
if (err) {
|
||||
pci_err(pdev, "eDMA probe failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Saving data structure reference */
|
||||
pci_set_drvdata(pdev, chip);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_edma_pcie_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct dw_edma_chip *chip = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
||||
/* Stopping eDMA driver */
|
||||
err = dw_edma_remove(chip);
|
||||
if (err)
|
||||
pci_warn(pdev, "can't remove device properly: %d\n", err);
|
||||
|
||||
/* Freeing IRQs */
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id dw_edma_pcie_id_table[] = {
|
||||
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
|
||||
|
||||
static struct pci_driver dw_edma_pcie_driver = {
|
||||
.name = "dw-edma-pcie",
|
||||
.id_table = dw_edma_pcie_id_table,
|
||||
.probe = dw_edma_pcie_probe,
|
||||
.remove = dw_edma_pcie_remove,
|
||||
};
|
||||
|
||||
module_pci_driver(dw_edma_pcie_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver");
|
||||
MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
|
354
drivers/dma/dw-edma/dw-edma-v0-core.c
Normal file
354
drivers/dma/dw-edma/dw-edma-v0-core.c
Normal file
|
@ -0,0 +1,354 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA v0 core
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include "dw-edma-core.h"
|
||||
#include "dw-edma-v0-core.h"
|
||||
#include "dw-edma-v0-regs.h"
|
||||
#include "dw-edma-v0-debugfs.h"
|
||||
|
||||
enum dw_edma_control {
|
||||
DW_EDMA_V0_CB = BIT(0),
|
||||
DW_EDMA_V0_TCB = BIT(1),
|
||||
DW_EDMA_V0_LLP = BIT(2),
|
||||
DW_EDMA_V0_LIE = BIT(3),
|
||||
DW_EDMA_V0_RIE = BIT(4),
|
||||
DW_EDMA_V0_CCS = BIT(8),
|
||||
DW_EDMA_V0_LLE = BIT(9),
|
||||
};
|
||||
|
||||
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
|
||||
{
|
||||
return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
|
||||
}
|
||||
|
||||
#define SET(dw, name, value) \
|
||||
writel(value, &(__dw_regs(dw)->name))
|
||||
|
||||
#define GET(dw, name) \
|
||||
readl(&(__dw_regs(dw)->name))
|
||||
|
||||
#define SET_RW(dw, dir, name, value) \
|
||||
do { \
|
||||
if ((dir) == EDMA_DIR_WRITE) \
|
||||
SET(dw, wr_##name, value); \
|
||||
else \
|
||||
SET(dw, rd_##name, value); \
|
||||
} while (0)
|
||||
|
||||
#define GET_RW(dw, dir, name) \
|
||||
((dir) == EDMA_DIR_WRITE \
|
||||
? GET(dw, wr_##name) \
|
||||
: GET(dw, rd_##name))
|
||||
|
||||
#define SET_BOTH(dw, name, value) \
|
||||
do { \
|
||||
SET(dw, wr_##name, value); \
|
||||
SET(dw, rd_##name, value); \
|
||||
} while (0)
|
||||
|
||||
static inline struct dw_edma_v0_ch_regs __iomem *
|
||||
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
|
||||
{
|
||||
if (dw->mode == EDMA_MODE_LEGACY)
|
||||
return &(__dw_regs(dw)->type.legacy.ch);
|
||||
|
||||
if (dir == EDMA_DIR_WRITE)
|
||||
return &__dw_regs(dw)->type.unroll.ch[ch].wr;
|
||||
|
||||
return &__dw_regs(dw)->type.unroll.ch[ch].rd;
|
||||
}
|
||||
|
||||
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
|
||||
u32 value, void __iomem *addr)
|
||||
{
|
||||
if (dw->mode == EDMA_MODE_LEGACY) {
|
||||
u32 viewport_sel;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dw->lock, flags);
|
||||
|
||||
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
|
||||
if (dir == EDMA_DIR_READ)
|
||||
viewport_sel |= BIT(31);
|
||||
|
||||
writel(viewport_sel,
|
||||
&(__dw_regs(dw)->type.legacy.viewport_sel));
|
||||
writel(value, addr);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||
} else {
|
||||
writel(value, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
|
||||
const void __iomem *addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
if (dw->mode == EDMA_MODE_LEGACY) {
|
||||
u32 viewport_sel;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dw->lock, flags);
|
||||
|
||||
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
|
||||
if (dir == EDMA_DIR_READ)
|
||||
viewport_sel |= BIT(31);
|
||||
|
||||
writel(viewport_sel,
|
||||
&(__dw_regs(dw)->type.legacy.viewport_sel));
|
||||
value = readl(addr);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||
} else {
|
||||
value = readl(addr);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
#define SET_CH(dw, dir, ch, name, value) \
|
||||
writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
|
||||
|
||||
#define GET_CH(dw, dir, ch, name) \
|
||||
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
|
||||
|
||||
#define SET_LL(ll, value) \
|
||||
writel(value, ll)
|
||||
|
||||
/* eDMA management callbacks */
|
||||
void dw_edma_v0_core_off(struct dw_edma *dw)
|
||||
{
|
||||
SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
|
||||
SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
|
||||
SET_BOTH(dw, engine_en, 0);
|
||||
}
|
||||
|
||||
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||
{
|
||||
u32 num_ch;
|
||||
|
||||
if (dir == EDMA_DIR_WRITE)
|
||||
num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
|
||||
else
|
||||
num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
|
||||
|
||||
if (num_ch > EDMA_V0_MAX_NR_CH)
|
||||
num_ch = EDMA_V0_MAX_NR_CH;
|
||||
|
||||
return (u16)num_ch;
|
||||
}
|
||||
|
||||
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
u32 tmp;
|
||||
|
||||
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
|
||||
GET_CH(dw, chan->dir, chan->id, ch_control1));
|
||||
|
||||
if (tmp == 1)
|
||||
return DMA_IN_PROGRESS;
|
||||
else if (tmp == 3)
|
||||
return DMA_COMPLETE;
|
||||
else
|
||||
return DMA_ERROR;
|
||||
}
|
||||
|
||||
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
|
||||
SET_RW(dw, chan->dir, int_clear,
|
||||
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
|
||||
}
|
||||
|
||||
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
|
||||
SET_RW(dw, chan->dir, int_clear,
|
||||
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
|
||||
}
|
||||
|
||||
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||
{
|
||||
return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
|
||||
}
|
||||
|
||||
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||
{
|
||||
return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
|
||||
}
|
||||
|
||||
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||
{
|
||||
struct dw_edma_burst *child;
|
||||
struct dw_edma_v0_lli *lli;
|
||||
struct dw_edma_v0_llp *llp;
|
||||
u32 control = 0, i = 0;
|
||||
u64 sar, dar, addr;
|
||||
int j;
|
||||
|
||||
lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
|
||||
|
||||
if (chunk->cb)
|
||||
control = DW_EDMA_V0_CB;
|
||||
|
||||
j = chunk->bursts_alloc;
|
||||
list_for_each_entry(child, &chunk->burst->list, list) {
|
||||
j--;
|
||||
if (!j)
|
||||
control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
|
||||
|
||||
/* Channel control */
|
||||
SET_LL(&lli[i].control, control);
|
||||
/* Transfer size */
|
||||
SET_LL(&lli[i].transfer_size, child->sz);
|
||||
/* SAR - low, high */
|
||||
sar = cpu_to_le64(child->sar);
|
||||
SET_LL(&lli[i].sar_low, lower_32_bits(sar));
|
||||
SET_LL(&lli[i].sar_high, upper_32_bits(sar));
|
||||
/* DAR - low, high */
|
||||
dar = cpu_to_le64(child->dar);
|
||||
SET_LL(&lli[i].dar_low, lower_32_bits(dar));
|
||||
SET_LL(&lli[i].dar_high, upper_32_bits(dar));
|
||||
i++;
|
||||
}
|
||||
|
||||
llp = (struct dw_edma_v0_llp *)&lli[i];
|
||||
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
|
||||
if (!chunk->cb)
|
||||
control |= DW_EDMA_V0_CB;
|
||||
|
||||
/* Channel control */
|
||||
SET_LL(&llp->control, control);
|
||||
/* Linked list - low, high */
|
||||
addr = cpu_to_le64(chunk->ll_region.paddr);
|
||||
SET_LL(&llp->llp_low, lower_32_bits(addr));
|
||||
SET_LL(&llp->llp_high, upper_32_bits(addr));
|
||||
}
|
||||
|
||||
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
{
|
||||
struct dw_edma_chan *chan = chunk->chan;
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
u32 tmp;
|
||||
u64 llp;
|
||||
|
||||
dw_edma_v0_core_write_chunk(chunk);
|
||||
|
||||
if (first) {
|
||||
/* Enable engine */
|
||||
SET_RW(dw, chan->dir, engine_en, BIT(0));
|
||||
/* Interrupt unmask - done, abort */
|
||||
tmp = GET_RW(dw, chan->dir, int_mask);
|
||||
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
|
||||
tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
|
||||
SET_RW(dw, chan->dir, int_mask, tmp);
|
||||
/* Linked list error */
|
||||
tmp = GET_RW(dw, chan->dir, linked_list_err_en);
|
||||
tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
|
||||
SET_RW(dw, chan->dir, linked_list_err_en, tmp);
|
||||
/* Channel control */
|
||||
SET_CH(dw, chan->dir, chan->id, ch_control1,
|
||||
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
|
||||
/* Linked list - low, high */
|
||||
llp = cpu_to_le64(chunk->ll_region.paddr);
|
||||
SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
|
||||
SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
|
||||
}
|
||||
/* Doorbell */
|
||||
SET_RW(dw, chan->dir, doorbell,
|
||||
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
|
||||
}
|
||||
|
||||
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma *dw = chan->chip->dw;
|
||||
u32 tmp = 0;
|
||||
|
||||
/* MSI done addr - low, high */
|
||||
SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
|
||||
SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
|
||||
/* MSI abort addr - low, high */
|
||||
SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
|
||||
SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
|
||||
/* MSI data - low, high */
|
||||
switch (chan->id) {
|
||||
case 0:
|
||||
case 1:
|
||||
tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
case 3:
|
||||
tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
case 5:
|
||||
tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
|
||||
break;
|
||||
|
||||
case 6:
|
||||
case 7:
|
||||
tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
|
||||
break;
|
||||
}
|
||||
|
||||
if (chan->id & BIT(0)) {
|
||||
/* Channel odd {1, 3, 5, 7} */
|
||||
tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
|
||||
tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
|
||||
chan->msi.data);
|
||||
} else {
|
||||
/* Channel even {0, 2, 4, 6} */
|
||||
tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
|
||||
tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
|
||||
chan->msi.data);
|
||||
}
|
||||
|
||||
switch (chan->id) {
|
||||
case 0:
|
||||
case 1:
|
||||
SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
case 3:
|
||||
SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
case 5:
|
||||
SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
|
||||
break;
|
||||
|
||||
case 6:
|
||||
case 7:
|
||||
SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* eDMA debugfs callbacks */
|
||||
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
|
||||
{
|
||||
dw_edma_v0_debugfs_on(chip);
|
||||
}
|
||||
|
||||
void dw_edma_v0_core_debugfs_off(void)
|
||||
{
|
||||
dw_edma_v0_debugfs_off();
|
||||
}
|
28
drivers/dma/dw-edma/dw-edma-v0-core.h
Normal file
28
drivers/dma/dw-edma/dw-edma-v0-core.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA v0 core
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_V0_CORE_H
|
||||
#define _DW_EDMA_V0_CORE_H
|
||||
|
||||
#include <linux/dma/edma.h>
|
||||
|
||||
/* eDMA management callbacks */
|
||||
void dw_edma_v0_core_off(struct dw_edma *chan);
|
||||
u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir);
|
||||
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan);
|
||||
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan);
|
||||
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan);
|
||||
u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir);
|
||||
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir);
|
||||
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
|
||||
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
|
||||
/* eDMA debug fs callbacks */
|
||||
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
|
||||
void dw_edma_v0_core_debugfs_off(void);
|
||||
|
||||
#endif /* _DW_EDMA_V0_CORE_H */
|
310
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
Normal file
310
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
Normal file
|
@ -0,0 +1,310 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA v0 core
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include "dw-edma-v0-debugfs.h"
|
||||
#include "dw-edma-v0-regs.h"
|
||||
#include "dw-edma-core.h"
|
||||
|
||||
#define REGS_ADDR(name) \
|
||||
((dma_addr_t *)®s->name)
|
||||
#define REGISTER(name) \
|
||||
{ #name, REGS_ADDR(name) }
|
||||
|
||||
#define WR_REGISTER(name) \
|
||||
{ #name, REGS_ADDR(wr_##name) }
|
||||
#define RD_REGISTER(name) \
|
||||
{ #name, REGS_ADDR(rd_##name) }
|
||||
|
||||
#define WR_REGISTER_LEGACY(name) \
|
||||
{ #name, REGS_ADDR(type.legacy.wr_##name) }
|
||||
#define RD_REGISTER_LEGACY(name) \
|
||||
{ #name, REGS_ADDR(type.legacy.rd_##name) }
|
||||
|
||||
#define WR_REGISTER_UNROLL(name) \
|
||||
{ #name, REGS_ADDR(type.unroll.wr_##name) }
|
||||
#define RD_REGISTER_UNROLL(name) \
|
||||
{ #name, REGS_ADDR(type.unroll.rd_##name) }
|
||||
|
||||
#define WRITE_STR "write"
|
||||
#define READ_STR "read"
|
||||
#define CHANNEL_STR "channel"
|
||||
#define REGISTERS_STR "registers"
|
||||
|
||||
static struct dentry *base_dir;
|
||||
static struct dw_edma *dw;
|
||||
static struct dw_edma_v0_regs *regs;
|
||||
|
||||
static struct {
|
||||
void *start;
|
||||
void *end;
|
||||
} lim[2][EDMA_V0_MAX_NR_CH];
|
||||
|
||||
struct debugfs_entries {
|
||||
char name[24];
|
||||
dma_addr_t *reg;
|
||||
};
|
||||
|
||||
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
|
||||
{
|
||||
if (dw->mode == EDMA_MODE_LEGACY &&
|
||||
data >= (void *)®s->type.legacy.ch) {
|
||||
void *ptr = (void *)®s->type.legacy.ch;
|
||||
u32 viewport_sel = 0;
|
||||
unsigned long flags;
|
||||
u16 ch;
|
||||
|
||||
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
|
||||
if (lim[0][ch].start >= data && data < lim[0][ch].end) {
|
||||
ptr += (data - lim[0][ch].start);
|
||||
goto legacy_sel_wr;
|
||||
}
|
||||
|
||||
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
|
||||
if (lim[1][ch].start >= data && data < lim[1][ch].end) {
|
||||
ptr += (data - lim[1][ch].start);
|
||||
goto legacy_sel_rd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
legacy_sel_rd:
|
||||
viewport_sel = BIT(31);
|
||||
legacy_sel_wr:
|
||||
viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
|
||||
|
||||
raw_spin_lock_irqsave(&dw->lock, flags);
|
||||
|
||||
writel(viewport_sel, ®s->type.legacy.viewport_sel);
|
||||
*val = readl(ptr);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||
} else {
|
||||
*val = readl(data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
|
||||
|
||||
static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
|
||||
int nr_entries, struct dentry *dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_entries; i++) {
|
||||
if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
|
||||
entries[i].reg, &fops_x32))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
|
||||
struct dentry *dir)
|
||||
{
|
||||
int nr_entries;
|
||||
const struct debugfs_entries debugfs_regs[] = {
|
||||
REGISTER(ch_control1),
|
||||
REGISTER(ch_control2),
|
||||
REGISTER(transfer_size),
|
||||
REGISTER(sar_low),
|
||||
REGISTER(sar_high),
|
||||
REGISTER(dar_low),
|
||||
REGISTER(dar_high),
|
||||
REGISTER(llp_low),
|
||||
REGISTER(llp_high),
|
||||
};
|
||||
|
||||
nr_entries = ARRAY_SIZE(debugfs_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
|
||||
}
|
||||
|
||||
static void dw_edma_debugfs_regs_wr(struct dentry *dir)
|
||||
{
|
||||
const struct debugfs_entries debugfs_regs[] = {
|
||||
/* eDMA global registers */
|
||||
WR_REGISTER(engine_en),
|
||||
WR_REGISTER(doorbell),
|
||||
WR_REGISTER(ch_arb_weight_low),
|
||||
WR_REGISTER(ch_arb_weight_high),
|
||||
/* eDMA interrupts registers */
|
||||
WR_REGISTER(int_status),
|
||||
WR_REGISTER(int_mask),
|
||||
WR_REGISTER(int_clear),
|
||||
WR_REGISTER(err_status),
|
||||
WR_REGISTER(done_imwr_low),
|
||||
WR_REGISTER(done_imwr_high),
|
||||
WR_REGISTER(abort_imwr_low),
|
||||
WR_REGISTER(abort_imwr_high),
|
||||
WR_REGISTER(ch01_imwr_data),
|
||||
WR_REGISTER(ch23_imwr_data),
|
||||
WR_REGISTER(ch45_imwr_data),
|
||||
WR_REGISTER(ch67_imwr_data),
|
||||
WR_REGISTER(linked_list_err_en),
|
||||
};
|
||||
const struct debugfs_entries debugfs_unroll_regs[] = {
|
||||
/* eDMA channel context grouping */
|
||||
WR_REGISTER_UNROLL(engine_chgroup),
|
||||
WR_REGISTER_UNROLL(engine_hshake_cnt_low),
|
||||
WR_REGISTER_UNROLL(engine_hshake_cnt_high),
|
||||
WR_REGISTER_UNROLL(ch0_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch1_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch2_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch3_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch4_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch5_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch6_pwr_en),
|
||||
WR_REGISTER_UNROLL(ch7_pwr_en),
|
||||
};
|
||||
struct dentry *regs_dir, *ch_dir;
|
||||
int nr_entries, i;
|
||||
char name[16];
|
||||
|
||||
regs_dir = debugfs_create_dir(WRITE_STR, dir);
|
||||
if (!regs_dir)
|
||||
return;
|
||||
|
||||
nr_entries = ARRAY_SIZE(debugfs_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
|
||||
|
||||
if (dw->mode == EDMA_MODE_UNROLL) {
|
||||
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
|
||||
regs_dir);
|
||||
}
|
||||
|
||||
for (i = 0; i < dw->wr_ch_cnt; i++) {
|
||||
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
|
||||
|
||||
ch_dir = debugfs_create_dir(name, regs_dir);
|
||||
if (!ch_dir)
|
||||
return;
|
||||
|
||||
dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir);
|
||||
|
||||
lim[0][i].start = ®s->type.unroll.ch[i].wr;
|
||||
lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0];
|
||||
}
|
||||
}
|
||||
|
||||
static void dw_edma_debugfs_regs_rd(struct dentry *dir)
|
||||
{
|
||||
const struct debugfs_entries debugfs_regs[] = {
|
||||
/* eDMA global registers */
|
||||
RD_REGISTER(engine_en),
|
||||
RD_REGISTER(doorbell),
|
||||
RD_REGISTER(ch_arb_weight_low),
|
||||
RD_REGISTER(ch_arb_weight_high),
|
||||
/* eDMA interrupts registers */
|
||||
RD_REGISTER(int_status),
|
||||
RD_REGISTER(int_mask),
|
||||
RD_REGISTER(int_clear),
|
||||
RD_REGISTER(err_status_low),
|
||||
RD_REGISTER(err_status_high),
|
||||
RD_REGISTER(linked_list_err_en),
|
||||
RD_REGISTER(done_imwr_low),
|
||||
RD_REGISTER(done_imwr_high),
|
||||
RD_REGISTER(abort_imwr_low),
|
||||
RD_REGISTER(abort_imwr_high),
|
||||
RD_REGISTER(ch01_imwr_data),
|
||||
RD_REGISTER(ch23_imwr_data),
|
||||
RD_REGISTER(ch45_imwr_data),
|
||||
RD_REGISTER(ch67_imwr_data),
|
||||
};
|
||||
const struct debugfs_entries debugfs_unroll_regs[] = {
|
||||
/* eDMA channel context grouping */
|
||||
RD_REGISTER_UNROLL(engine_chgroup),
|
||||
RD_REGISTER_UNROLL(engine_hshake_cnt_low),
|
||||
RD_REGISTER_UNROLL(engine_hshake_cnt_high),
|
||||
RD_REGISTER_UNROLL(ch0_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch1_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch2_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch3_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch4_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch5_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch6_pwr_en),
|
||||
RD_REGISTER_UNROLL(ch7_pwr_en),
|
||||
};
|
||||
struct dentry *regs_dir, *ch_dir;
|
||||
int nr_entries, i;
|
||||
char name[16];
|
||||
|
||||
regs_dir = debugfs_create_dir(READ_STR, dir);
|
||||
if (!regs_dir)
|
||||
return;
|
||||
|
||||
nr_entries = ARRAY_SIZE(debugfs_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
|
||||
|
||||
if (dw->mode == EDMA_MODE_UNROLL) {
|
||||
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
|
||||
regs_dir);
|
||||
}
|
||||
|
||||
for (i = 0; i < dw->rd_ch_cnt; i++) {
|
||||
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
|
||||
|
||||
ch_dir = debugfs_create_dir(name, regs_dir);
|
||||
if (!ch_dir)
|
||||
return;
|
||||
|
||||
dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir);
|
||||
|
||||
lim[1][i].start = ®s->type.unroll.ch[i].rd;
|
||||
lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0];
|
||||
}
|
||||
}
|
||||
|
||||
static void dw_edma_debugfs_regs(void)
|
||||
{
|
||||
const struct debugfs_entries debugfs_regs[] = {
|
||||
REGISTER(ctrl_data_arb_prior),
|
||||
REGISTER(ctrl),
|
||||
};
|
||||
struct dentry *regs_dir;
|
||||
int nr_entries;
|
||||
|
||||
regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
|
||||
if (!regs_dir)
|
||||
return;
|
||||
|
||||
nr_entries = ARRAY_SIZE(debugfs_regs);
|
||||
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
|
||||
|
||||
dw_edma_debugfs_regs_wr(regs_dir);
|
||||
dw_edma_debugfs_regs_rd(regs_dir);
|
||||
}
|
||||
|
||||
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
|
||||
{
|
||||
dw = chip->dw;
|
||||
if (!dw)
|
||||
return;
|
||||
|
||||
regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
|
||||
if (!regs)
|
||||
return;
|
||||
|
||||
base_dir = debugfs_create_dir(dw->name, 0);
|
||||
if (!base_dir)
|
||||
return;
|
||||
|
||||
debugfs_create_u32("version", 0444, base_dir, &dw->version);
|
||||
debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
|
||||
debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
|
||||
debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
|
||||
|
||||
dw_edma_debugfs_regs();
|
||||
}
|
||||
|
||||
void dw_edma_v0_debugfs_off(void)
|
||||
{
|
||||
debugfs_remove_recursive(base_dir);
|
||||
}
|
27
drivers/dma/dw-edma/dw-edma-v0-debugfs.h
Normal file
27
drivers/dma/dw-edma/dw-edma-v0-debugfs.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA v0 core
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_V0_DEBUG_FS_H
|
||||
#define _DW_EDMA_V0_DEBUG_FS_H
|
||||
|
||||
#include <linux/dma/edma.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
|
||||
void dw_edma_v0_debugfs_off(void);
|
||||
#else
|
||||
static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dw_edma_v0_debugfs_off(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
|
158
drivers/dma/dw-edma/dw-edma-v0-regs.h
Normal file
158
drivers/dma/dw-edma/dw-edma-v0-regs.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA v0 core
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_V0_REGS_H
|
||||
#define _DW_EDMA_V0_REGS_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#define EDMA_V0_MAX_NR_CH 8
|
||||
#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0)
|
||||
#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0)
|
||||
#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16)
|
||||
#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0)
|
||||
#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16)
|
||||
#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5)
|
||||
#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0)
|
||||
#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0)
|
||||
|
||||
#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16)
|
||||
#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0)
|
||||
|
||||
struct dw_edma_v0_ch_regs {
|
||||
u32 ch_control1; /* 0x000 */
|
||||
u32 ch_control2; /* 0x004 */
|
||||
u32 transfer_size; /* 0x008 */
|
||||
u32 sar_low; /* 0x00c */
|
||||
u32 sar_high; /* 0x010 */
|
||||
u32 dar_low; /* 0x014 */
|
||||
u32 dar_high; /* 0x018 */
|
||||
u32 llp_low; /* 0x01c */
|
||||
u32 llp_high; /* 0x020 */
|
||||
};
|
||||
|
||||
struct dw_edma_v0_ch {
|
||||
struct dw_edma_v0_ch_regs wr; /* 0x200 */
|
||||
u32 padding_1[55]; /* [0x224..0x2fc] */
|
||||
struct dw_edma_v0_ch_regs rd; /* 0x300 */
|
||||
u32 padding_2[55]; /* [0x224..0x2fc] */
|
||||
};
|
||||
|
||||
struct dw_edma_v0_unroll {
|
||||
u32 padding_1; /* 0x0f8 */
|
||||
u32 wr_engine_chgroup; /* 0x100 */
|
||||
u32 rd_engine_chgroup; /* 0x104 */
|
||||
u32 wr_engine_hshake_cnt_low; /* 0x108 */
|
||||
u32 wr_engine_hshake_cnt_high; /* 0x10c */
|
||||
u32 padding_2[2]; /* [0x110..0x114] */
|
||||
u32 rd_engine_hshake_cnt_low; /* 0x118 */
|
||||
u32 rd_engine_hshake_cnt_high; /* 0x11c */
|
||||
u32 padding_3[2]; /* [0x120..0x124] */
|
||||
u32 wr_ch0_pwr_en; /* 0x128 */
|
||||
u32 wr_ch1_pwr_en; /* 0x12c */
|
||||
u32 wr_ch2_pwr_en; /* 0x130 */
|
||||
u32 wr_ch3_pwr_en; /* 0x134 */
|
||||
u32 wr_ch4_pwr_en; /* 0x138 */
|
||||
u32 wr_ch5_pwr_en; /* 0x13c */
|
||||
u32 wr_ch6_pwr_en; /* 0x140 */
|
||||
u32 wr_ch7_pwr_en; /* 0x144 */
|
||||
u32 padding_4[8]; /* [0x148..0x164] */
|
||||
u32 rd_ch0_pwr_en; /* 0x168 */
|
||||
u32 rd_ch1_pwr_en; /* 0x16c */
|
||||
u32 rd_ch2_pwr_en; /* 0x170 */
|
||||
u32 rd_ch3_pwr_en; /* 0x174 */
|
||||
u32 rd_ch4_pwr_en; /* 0x178 */
|
||||
u32 rd_ch5_pwr_en; /* 0x18c */
|
||||
u32 rd_ch6_pwr_en; /* 0x180 */
|
||||
u32 rd_ch7_pwr_en; /* 0x184 */
|
||||
u32 padding_5[30]; /* [0x188..0x1fc] */
|
||||
struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */
|
||||
};
|
||||
|
||||
struct dw_edma_v0_legacy {
|
||||
u32 viewport_sel; /* 0x0f8 */
|
||||
struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */
|
||||
};
|
||||
|
||||
struct dw_edma_v0_regs {
|
||||
/* eDMA global registers */
|
||||
u32 ctrl_data_arb_prior; /* 0x000 */
|
||||
u32 padding_1; /* 0x004 */
|
||||
u32 ctrl; /* 0x008 */
|
||||
u32 wr_engine_en; /* 0x00c */
|
||||
u32 wr_doorbell; /* 0x010 */
|
||||
u32 padding_2; /* 0x014 */
|
||||
u32 wr_ch_arb_weight_low; /* 0x018 */
|
||||
u32 wr_ch_arb_weight_high; /* 0x01c */
|
||||
u32 padding_3[3]; /* [0x020..0x028] */
|
||||
u32 rd_engine_en; /* 0x02c */
|
||||
u32 rd_doorbell; /* 0x030 */
|
||||
u32 padding_4; /* 0x034 */
|
||||
u32 rd_ch_arb_weight_low; /* 0x038 */
|
||||
u32 rd_ch_arb_weight_high; /* 0x03c */
|
||||
u32 padding_5[3]; /* [0x040..0x048] */
|
||||
/* eDMA interrupts registers */
|
||||
u32 wr_int_status; /* 0x04c */
|
||||
u32 padding_6; /* 0x050 */
|
||||
u32 wr_int_mask; /* 0x054 */
|
||||
u32 wr_int_clear; /* 0x058 */
|
||||
u32 wr_err_status; /* 0x05c */
|
||||
u32 wr_done_imwr_low; /* 0x060 */
|
||||
u32 wr_done_imwr_high; /* 0x064 */
|
||||
u32 wr_abort_imwr_low; /* 0x068 */
|
||||
u32 wr_abort_imwr_high; /* 0x06c */
|
||||
u32 wr_ch01_imwr_data; /* 0x070 */
|
||||
u32 wr_ch23_imwr_data; /* 0x074 */
|
||||
u32 wr_ch45_imwr_data; /* 0x078 */
|
||||
u32 wr_ch67_imwr_data; /* 0x07c */
|
||||
u32 padding_7[4]; /* [0x080..0x08c] */
|
||||
u32 wr_linked_list_err_en; /* 0x090 */
|
||||
u32 padding_8[3]; /* [0x094..0x09c] */
|
||||
u32 rd_int_status; /* 0x0a0 */
|
||||
u32 padding_9; /* 0x0a4 */
|
||||
u32 rd_int_mask; /* 0x0a8 */
|
||||
u32 rd_int_clear; /* 0x0ac */
|
||||
u32 padding_10; /* 0x0b0 */
|
||||
u32 rd_err_status_low; /* 0x0b4 */
|
||||
u32 rd_err_status_high; /* 0x0b8 */
|
||||
u32 padding_11[2]; /* [0x0bc..0x0c0] */
|
||||
u32 rd_linked_list_err_en; /* 0x0c4 */
|
||||
u32 padding_12; /* 0x0c8 */
|
||||
u32 rd_done_imwr_low; /* 0x0cc */
|
||||
u32 rd_done_imwr_high; /* 0x0d0 */
|
||||
u32 rd_abort_imwr_low; /* 0x0d4 */
|
||||
u32 rd_abort_imwr_high; /* 0x0d8 */
|
||||
u32 rd_ch01_imwr_data; /* 0x0dc */
|
||||
u32 rd_ch23_imwr_data; /* 0x0e0 */
|
||||
u32 rd_ch45_imwr_data; /* 0x0e4 */
|
||||
u32 rd_ch67_imwr_data; /* 0x0e8 */
|
||||
u32 padding_13[4]; /* [0x0ec..0x0f8] */
|
||||
/* eDMA channel context grouping */
|
||||
union dw_edma_v0_type {
|
||||
struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */
|
||||
struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */
|
||||
} type;
|
||||
};
|
||||
|
||||
struct dw_edma_v0_lli {
|
||||
u32 control;
|
||||
u32 transfer_size;
|
||||
u32 sar_low;
|
||||
u32 sar_high;
|
||||
u32 dar_low;
|
||||
u32 dar_high;
|
||||
};
|
||||
|
||||
struct dw_edma_v0_llp {
|
||||
u32 control;
|
||||
u32 reserved;
|
||||
u32 llp_low;
|
||||
u32 llp_high;
|
||||
};
|
||||
|
||||
#endif /* _DW_EDMA_V0_REGS_H */
|
|
@ -15,10 +15,13 @@
|
|||
struct dw_dma_pci_data {
|
||||
const struct dw_dma_platform_data *pdata;
|
||||
int (*probe)(struct dw_dma_chip *chip);
|
||||
int (*remove)(struct dw_dma_chip *chip);
|
||||
struct dw_dma_chip *chip;
|
||||
};
|
||||
|
||||
static const struct dw_dma_pci_data dw_pci_data = {
|
||||
.probe = dw_dma_probe,
|
||||
.remove = dw_dma_remove,
|
||||
};
|
||||
|
||||
static const struct dw_dma_platform_data idma32_pdata = {
|
||||
|
@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = {
|
|||
static const struct dw_dma_pci_data idma32_pci_data = {
|
||||
.pdata = &idma32_pdata,
|
||||
.probe = idma32_dma_probe,
|
||||
.remove = idma32_dma_remove,
|
||||
};
|
||||
|
||||
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
{
|
||||
const struct dw_dma_pci_data *data = (void *)pid->driver_data;
|
||||
const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
|
||||
struct dw_dma_pci_data *data;
|
||||
struct dw_dma_chip *chip;
|
||||
int ret;
|
||||
|
||||
|
@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
|
||||
if (!chip)
|
||||
return -ENOMEM;
|
||||
|
@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
chip->irq = pdev->irq;
|
||||
chip->pdata = data->pdata;
|
||||
|
||||
data->chip = chip;
|
||||
|
||||
ret = data->probe(chip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_set_drvdata(pdev, chip);
|
||||
pci_set_drvdata(pdev, data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct dw_dma_chip *chip = pci_get_drvdata(pdev);
|
||||
struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
|
||||
struct dw_dma_chip *chip = data->chip;
|
||||
int ret;
|
||||
|
||||
ret = dw_dma_remove(chip);
|
||||
ret = data->remove(chip);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
|
||||
}
|
||||
|
@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
static int dw_pci_suspend_late(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci = to_pci_dev(dev);
|
||||
struct dw_dma_chip *chip = pci_get_drvdata(pci);
|
||||
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
|
||||
struct dw_dma_chip *chip = data->chip;
|
||||
|
||||
return do_dw_dma_disable(chip);
|
||||
};
|
||||
|
||||
static int dw_pci_resume_early(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci = to_pci_dev(dev);
|
||||
struct dw_dma_chip *chip = pci_get_drvdata(pci);
|
||||
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
|
||||
struct dw_dma_chip *chip = data->chip;
|
||||
|
||||
return do_dw_dma_enable(chip);
|
||||
};
|
||||
|
@ -131,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
|
||||
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
|
||||
|
||||
/* Elkhart Lake iDMA 32-bit (OSE DMA) */
|
||||
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
|
||||
|
||||
/* Haswell */
|
||||
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
|
|||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
|
||||
if (fsl_chan->edma->version == v1) {
|
||||
if (fsl_chan->edma->drvdata->version == v1) {
|
||||
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
|
||||
edma_writeb(fsl_chan->edma, ch, regs->serq);
|
||||
} else {
|
||||
|
@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
|
|||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
|
||||
if (fsl_chan->edma->version == v1) {
|
||||
if (fsl_chan->edma->drvdata->version == v1) {
|
||||
edma_writeb(fsl_chan->edma, ch, regs->cerq);
|
||||
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
|
||||
} else {
|
||||
|
@ -77,22 +77,33 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
|
||||
|
||||
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
|
||||
u32 off, u32 slot, bool enable)
|
||||
{
|
||||
u8 val8;
|
||||
|
||||
if (enable)
|
||||
val8 = EDMAMUX_CHCFG_ENBL | slot;
|
||||
else
|
||||
val8 = EDMAMUX_CHCFG_DIS;
|
||||
|
||||
iowrite8(val8, addr + off);
|
||||
}
|
||||
|
||||
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
|
||||
unsigned int slot, bool enable)
|
||||
{
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
void __iomem *muxaddr;
|
||||
unsigned int chans_per_mux, ch_off;
|
||||
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
|
||||
|
||||
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
|
||||
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
|
||||
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
|
||||
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
|
||||
slot = EDMAMUX_CHCFG_SOURCE(slot);
|
||||
|
||||
if (enable)
|
||||
iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
|
||||
else
|
||||
iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
|
||||
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
|
||||
|
||||
|
@ -647,28 +658,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
|
|||
edma->regs.erql = edma->membase + EDMA_ERQ;
|
||||
edma->regs.eeil = edma->membase + EDMA_EEI;
|
||||
|
||||
edma->regs.serq = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_SERQ : EDMA64_SERQ);
|
||||
edma->regs.cerq = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_CERQ : EDMA64_CERQ);
|
||||
edma->regs.seei = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_SEEI : EDMA64_SEEI);
|
||||
edma->regs.ceei = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_CEEI : EDMA64_CEEI);
|
||||
edma->regs.cint = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_CINT : EDMA64_CINT);
|
||||
edma->regs.cerr = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_CERR : EDMA64_CERR);
|
||||
edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_SSRT : EDMA64_SSRT);
|
||||
edma->regs.cdne = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_CDNE : EDMA64_CDNE);
|
||||
edma->regs.intl = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_INTR : EDMA64_INTL);
|
||||
edma->regs.errl = edma->membase + ((edma->version == v1) ?
|
||||
EDMA_ERR : EDMA64_ERRL);
|
||||
edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SERQ : EDMA_SERQ);
|
||||
edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CERQ : EDMA_CERQ);
|
||||
edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SEEI : EDMA_SEEI);
|
||||
edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CEEI : EDMA_CEEI);
|
||||
edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CINT : EDMA_CINT);
|
||||
edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CERR : EDMA_CERR);
|
||||
edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SSRT : EDMA_SSRT);
|
||||
edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CDNE : EDMA_CDNE);
|
||||
edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_INTL : EDMA_INTR);
|
||||
edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_ERRL : EDMA_ERR);
|
||||
|
||||
if (edma->version == v2) {
|
||||
if (edma->drvdata->version == v2) {
|
||||
edma->regs.erqh = edma->membase + EDMA64_ERQH;
|
||||
edma->regs.eeih = edma->membase + EDMA64_EEIH;
|
||||
edma->regs.errh = edma->membase + EDMA64_ERRH;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#define _FSL_EDMA_COMMON_H_
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "virt-dma.h"
|
||||
|
||||
#define EDMA_CR_EDBG BIT(1)
|
||||
|
@ -140,17 +141,24 @@ enum edma_version {
|
|||
v2, /* 64ch Coldfire */
|
||||
};
|
||||
|
||||
struct fsl_edma_drvdata {
|
||||
enum edma_version version;
|
||||
u32 dmamuxs;
|
||||
int (*setup_irq)(struct platform_device *pdev,
|
||||
struct fsl_edma_engine *fsl_edma);
|
||||
};
|
||||
|
||||
struct fsl_edma_engine {
|
||||
struct dma_device dma_dev;
|
||||
void __iomem *membase;
|
||||
void __iomem *muxbase[DMAMUX_NR];
|
||||
struct clk *muxclk[DMAMUX_NR];
|
||||
struct mutex fsl_edma_mutex;
|
||||
const struct fsl_edma_drvdata *drvdata;
|
||||
u32 n_chans;
|
||||
int txirq;
|
||||
int errirq;
|
||||
bool big_endian;
|
||||
enum edma_version version;
|
||||
struct edma_regs regs;
|
||||
struct fsl_edma_chan chans[];
|
||||
};
|
||||
|
|
|
@ -92,7 +92,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
|
|||
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
|
||||
struct dma_chan *chan, *_chan;
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
|
||||
u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
|
||||
unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
|
||||
|
||||
if (dma_spec->args_count != 2)
|
||||
return NULL;
|
||||
|
@ -180,16 +181,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
|
|||
clk_disable_unprepare(fsl_edma->muxclk[i]);
|
||||
}
|
||||
|
||||
static struct fsl_edma_drvdata vf610_data = {
|
||||
.version = v1,
|
||||
.dmamuxs = DMAMUX_NR,
|
||||
.setup_irq = fsl_edma_irq_init,
|
||||
};
|
||||
|
||||
static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||
|
||||
static int fsl_edma_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id =
|
||||
of_match_device(fsl_edma_dt_ids, &pdev->dev);
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct fsl_edma_engine *fsl_edma;
|
||||
const struct fsl_edma_drvdata *drvdata = NULL;
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
struct edma_regs *regs;
|
||||
struct resource *res;
|
||||
int len, chans;
|
||||
int ret, i;
|
||||
|
||||
if (of_id)
|
||||
drvdata = of_id->data;
|
||||
if (!drvdata) {
|
||||
dev_err(&pdev->dev, "unable to find driver data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "dma-channels", &chans);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't get dma-channels.\n");
|
||||
|
@ -201,7 +224,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||
if (!fsl_edma)
|
||||
return -ENOMEM;
|
||||
|
||||
fsl_edma->version = v1;
|
||||
fsl_edma->drvdata = drvdata;
|
||||
fsl_edma->n_chans = chans;
|
||||
mutex_init(&fsl_edma->fsl_edma_mutex);
|
||||
|
||||
|
@ -213,7 +236,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||
fsl_edma_setup_regs(fsl_edma);
|
||||
regs = &fsl_edma->regs;
|
||||
|
||||
for (i = 0; i < DMAMUX_NR; i++) {
|
||||
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
|
||||
char clkname[32];
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
|
||||
|
@ -259,7 +282,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
edma_writel(fsl_edma, ~0, regs->intl);
|
||||
ret = fsl_edma_irq_init(pdev, fsl_edma);
|
||||
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -291,7 +314,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register Freescale eDMA engine. (%d)\n", ret);
|
||||
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
|
||||
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -300,7 +323,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev,
|
||||
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
|
||||
dma_async_device_unregister(&fsl_edma->dma_dev);
|
||||
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
|
||||
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -319,7 +342,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
|
|||
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
|
||||
of_dma_controller_free(np);
|
||||
dma_async_device_unregister(&fsl_edma->dma_dev);
|
||||
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
|
||||
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -378,12 +401,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = {
|
|||
.resume_early = fsl_edma_resume_early,
|
||||
};
|
||||
|
||||
static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
{ .compatible = "fsl,vf610-edma", },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||
|
||||
static struct platform_driver fsl_edma_driver = {
|
||||
.driver = {
|
||||
.name = "fsl-edma",
|
||||
|
|
|
@ -113,6 +113,7 @@
|
|||
/* Field definition for Descriptor offset */
|
||||
#define QDMA_CCDF_STATUS 20
|
||||
#define QDMA_CCDF_OFFSET 20
|
||||
#define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
|
||||
|
||||
/* Field definition for safe loop count*/
|
||||
#define FSL_QDMA_HALT_COUNT 1500
|
||||
|
@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
|
|||
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
||||
dma_addr_t dst, dma_addr_t src, u32 len)
|
||||
{
|
||||
u32 cmd;
|
||||
struct fsl_qdma_format *sdf, *ddf;
|
||||
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
|
||||
|
||||
|
@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
|||
/* This entry is the last entry. */
|
||||
qdma_csgf_set_f(csgf_dest, len);
|
||||
/* Descriptor Buffer */
|
||||
sdf->data =
|
||||
cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
ddf->data =
|
||||
cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
ddf->data |=
|
||||
cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
sdf->data = QDMA_SDDF_CMD(cmd);
|
||||
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
|
||||
ddf->data = QDMA_SDDF_CMD(cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -61,10 +61,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
|
|||
|
||||
if (hsuc->direction == DMA_MEM_TO_DEV) {
|
||||
bsr = config->dst_maxburst;
|
||||
mtsr = config->src_addr_width;
|
||||
mtsr = config->dst_addr_width;
|
||||
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
|
||||
bsr = config->src_maxburst;
|
||||
mtsr = config->dst_addr_width;
|
||||
mtsr = config->src_addr_width;
|
||||
}
|
||||
|
||||
hsu_chan_disable(hsuc);
|
||||
|
|
|
@ -1934,16 +1934,11 @@ static int sdma_init(struct sdma_engine *sdma)
|
|||
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
struct imx_dma_data *data = fn_param;
|
||||
|
||||
if (!imx_dma_is_general_purpose(chan))
|
||||
return false;
|
||||
|
||||
/* return false if it's not the right device */
|
||||
if (sdma->dev->of_node != data->of_node)
|
||||
return false;
|
||||
|
||||
sdmac->data = *data;
|
||||
chan->private = &sdmac->data;
|
||||
|
||||
|
@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
|
|||
* be set to sdmac->event_id1.
|
||||
*/
|
||||
data.dma_request2 = 0;
|
||||
data.of_node = ofdma->of_node;
|
||||
|
||||
return dma_request_channel(mask, sdma_filter_fn, &data);
|
||||
return __dma_request_channel(&mask, sdma_filter_fn, &data,
|
||||
ofdma->of_node);
|
||||
}
|
||||
|
||||
static int sdma_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
|
|||
free_irq(irq, mcf_edma);
|
||||
}
|
||||
|
||||
static struct fsl_edma_drvdata mcf_data = {
|
||||
.version = v2,
|
||||
.setup_irq = mcf_edma_irq_init,
|
||||
};
|
||||
|
||||
static int mcf_edma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mcf_edma_platform_data *pdata;
|
||||
|
@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
|||
|
||||
mcf_edma->n_chans = chans;
|
||||
|
||||
/* Set up version for ColdFire edma */
|
||||
mcf_edma->version = v2;
|
||||
/* Set up drvdata for ColdFire edma */
|
||||
mcf_edma->drvdata = &mcf_data;
|
||||
mcf_edma->big_endian = 1;
|
||||
|
||||
if (!mcf_edma->n_chans) {
|
||||
|
@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
|||
iowrite32(~0, regs->inth);
|
||||
iowrite32(~0, regs->intl);
|
||||
|
||||
ret = mcf_edma_irq_init(pdev, mcf_edma);
|
||||
ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -25,3 +25,14 @@ config MTK_CQDMA
|
|||
|
||||
This controller provides the channels which is dedicated to
|
||||
memory-to-memory transfer to offload from CPU.
|
||||
|
||||
config MTK_UART_APDMA
|
||||
tristate "MediaTek SoCs APDMA support for UART"
|
||||
depends on OF && SERIAL_8250_MT6577
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Support for the UART DMA engine found on MediaTek MTK SoCs.
|
||||
When SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
|
||||
you can enable the config. The DMA engine can only be used
|
||||
with MediaTek SoCs.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o
|
||||
obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
|
||||
obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o
|
||||
|
|
666
drivers/dma/mediatek/mtk-uart-apdma.c
Normal file
666
drivers/dma/mediatek/mtk-uart-apdma.c
Normal file
|
@ -0,0 +1,666 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* MediaTek UART APDMA driver.
|
||||
*
|
||||
* Copyright (c) 2019 MediaTek Inc.
|
||||
* Author: Long Cheng <long.cheng@mediatek.com>
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "../virt-dma.h"
|
||||
|
||||
/* The default number of virtual channel */
|
||||
#define MTK_UART_APDMA_NR_VCHANS 8
|
||||
|
||||
#define VFF_EN_B BIT(0)
|
||||
#define VFF_STOP_B BIT(0)
|
||||
#define VFF_FLUSH_B BIT(0)
|
||||
#define VFF_4G_EN_B BIT(0)
|
||||
/* rx valid size >= vff thre */
|
||||
#define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
|
||||
/* tx left size >= vff thre */
|
||||
#define VFF_TX_INT_EN_B BIT(0)
|
||||
#define VFF_WARM_RST_B BIT(0)
|
||||
#define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
|
||||
#define VFF_TX_INT_CLR_B 0
|
||||
#define VFF_STOP_CLR_B 0
|
||||
#define VFF_EN_CLR_B 0
|
||||
#define VFF_INT_EN_CLR_B 0
|
||||
#define VFF_4G_SUPPORT_CLR_B 0
|
||||
|
||||
/*
|
||||
* interrupt trigger level for tx
|
||||
* if threshold is n, no polling is required to start tx.
|
||||
* otherwise need polling VFF_FLUSH.
|
||||
*/
|
||||
#define VFF_TX_THRE(n) (n)
|
||||
/* interrupt trigger level for rx */
|
||||
#define VFF_RX_THRE(n) ((n) * 3 / 4)
|
||||
|
||||
#define VFF_RING_SIZE 0xffff
|
||||
/* invert this bit when wrap ring head again */
|
||||
#define VFF_RING_WRAP 0x10000
|
||||
|
||||
#define VFF_INT_FLAG 0x00
|
||||
#define VFF_INT_EN 0x04
|
||||
#define VFF_EN 0x08
|
||||
#define VFF_RST 0x0c
|
||||
#define VFF_STOP 0x10
|
||||
#define VFF_FLUSH 0x14
|
||||
#define VFF_ADDR 0x1c
|
||||
#define VFF_LEN 0x24
|
||||
#define VFF_THRE 0x28
|
||||
#define VFF_WPT 0x2c
|
||||
#define VFF_RPT 0x30
|
||||
/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
|
||||
#define VFF_VALID_SIZE 0x3c
|
||||
/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
|
||||
#define VFF_LEFT_SIZE 0x40
|
||||
#define VFF_DEBUG_STATUS 0x50
|
||||
#define VFF_4G_SUPPORT 0x54
|
||||
|
||||
struct mtk_uart_apdmadev {
|
||||
struct dma_device ddev;
|
||||
struct clk *clk;
|
||||
bool support_33bits;
|
||||
unsigned int dma_requests;
|
||||
};
|
||||
|
||||
struct mtk_uart_apdma_desc {
|
||||
struct virt_dma_desc vd;
|
||||
|
||||
dma_addr_t addr;
|
||||
unsigned int avail_len;
|
||||
};
|
||||
|
||||
struct mtk_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct dma_slave_config cfg;
|
||||
struct mtk_uart_apdma_desc *desc;
|
||||
enum dma_transfer_direction dir;
|
||||
|
||||
void __iomem *base;
|
||||
unsigned int irq;
|
||||
|
||||
unsigned int rx_status;
|
||||
};
|
||||
|
||||
static inline struct mtk_uart_apdmadev *
|
||||
to_mtk_uart_apdma_dev(struct dma_device *d)
|
||||
{
|
||||
return container_of(d, struct mtk_uart_apdmadev, ddev);
|
||||
}
|
||||
|
||||
static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
|
||||
{
|
||||
return container_of(c, struct mtk_chan, vc.chan);
|
||||
}
|
||||
|
||||
static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
|
||||
(struct dma_async_tx_descriptor *t)
|
||||
{
|
||||
return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_write(struct mtk_chan *c,
|
||||
unsigned int reg, unsigned int val)
|
||||
{
|
||||
writel(val, c->base + reg);
|
||||
}
|
||||
|
||||
static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
|
||||
{
|
||||
return readl(c->base + reg);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct dma_chan *chan = vd->tx.chan;
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
|
||||
kfree(c->desc);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd =
|
||||
to_mtk_uart_apdma_dev(c->vc.chan.device);
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
unsigned int wpt, vff_sz;
|
||||
|
||||
vff_sz = c->cfg.dst_port_window_size;
|
||||
if (!mtk_uart_apdma_read(c, VFF_LEN)) {
|
||||
mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
|
||||
mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
|
||||
mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
|
||||
mtk_uart_apdma_write(c, VFF_WPT, 0);
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
||||
|
||||
if (mtkd->support_33bits)
|
||||
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
|
||||
}
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
|
||||
if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
|
||||
dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
|
||||
|
||||
if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
|
||||
return;
|
||||
}
|
||||
|
||||
wpt = mtk_uart_apdma_read(c, VFF_WPT);
|
||||
|
||||
wpt += c->desc->avail_len;
|
||||
if ((wpt & VFF_RING_SIZE) == vff_sz)
|
||||
wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
|
||||
|
||||
/* Let DMA start moving data */
|
||||
mtk_uart_apdma_write(c, VFF_WPT, wpt);
|
||||
|
||||
/* HW auto set to 0 when left size >= threshold */
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
|
||||
if (!mtk_uart_apdma_read(c, VFF_FLUSH))
|
||||
mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd =
|
||||
to_mtk_uart_apdma_dev(c->vc.chan.device);
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
unsigned int vff_sz;
|
||||
|
||||
vff_sz = c->cfg.src_port_window_size;
|
||||
if (!mtk_uart_apdma_read(c, VFF_LEN)) {
|
||||
mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
|
||||
mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
|
||||
mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
|
||||
mtk_uart_apdma_write(c, VFF_RPT, 0);
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
|
||||
|
||||
if (mtkd->support_33bits)
|
||||
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
|
||||
}
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
|
||||
if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
|
||||
dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||
|
||||
list_del(&d->vd.node);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
unsigned int len, wg, rg;
|
||||
int cnt;
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
|
||||
|
||||
if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
|
||||
return;
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
|
||||
len = c->cfg.src_port_window_size;
|
||||
rg = mtk_uart_apdma_read(c, VFF_RPT);
|
||||
wg = mtk_uart_apdma_read(c, VFF_WPT);
|
||||
cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
|
||||
|
||||
/*
|
||||
* The buffer is ring buffer. If wrap bit different,
|
||||
* represents the start of the next cycle for WPT
|
||||
*/
|
||||
if ((rg ^ wg) & VFF_RING_WRAP)
|
||||
cnt += len;
|
||||
|
||||
c->rx_status = d->avail_len - cnt;
|
||||
mtk_uart_apdma_write(c, VFF_RPT, wg);
|
||||
|
||||
list_del(&d->vd.node);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
}
|
||||
|
||||
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct dma_chan *chan = (struct dma_chan *)dev_id;
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (c->dir == DMA_DEV_TO_MEM)
|
||||
mtk_uart_apdma_rx_handler(c);
|
||||
else if (c->dir == DMA_MEM_TO_DEV)
|
||||
mtk_uart_apdma_tx_handler(c);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
unsigned int status;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(mtkd->ddev.dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(chan->device->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_ADDR, 0);
|
||||
mtk_uart_apdma_write(c, VFF_THRE, 0);
|
||||
mtk_uart_apdma_write(c, VFF_LEN, 0);
|
||||
mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
|
||||
|
||||
ret = readx_poll_timeout(readl, c->base + VFF_EN,
|
||||
status, !status, 10, 100);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
|
||||
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
|
||||
if (ret < 0) {
|
||||
dev_err(chan->device->dev, "Can't request dma IRQ\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mtkd->support_33bits)
|
||||
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
|
||||
free_irq(c->irq, chan);
|
||||
|
||||
tasklet_kill(&c->vc.task);
|
||||
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
|
||||
pm_runtime_put_sync(mtkd->ddev.dev);
|
||||
}
|
||||
|
||||
static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
enum dma_status ret;
|
||||
|
||||
ret = dma_cookie_status(chan, cookie, txstate);
|
||||
if (!txstate)
|
||||
return ret;
|
||||
|
||||
dma_set_residue(txstate, c->rx_status);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* dmaengine_prep_slave_single will call the function. and sglen is 1.
|
||||
* 8250 uart using one ring buffer, and deal with one sg.
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
|
||||
(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sglen, enum dma_transfer_direction dir,
|
||||
unsigned long tx_flags, void *context)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
struct mtk_uart_apdma_desc *d;
|
||||
|
||||
if (!is_slave_direction(dir) || sglen != 1)
|
||||
return NULL;
|
||||
|
||||
/* Now allocate and setup the descriptor */
|
||||
d = kzalloc(sizeof(*d), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
d->avail_len = sg_dma_len(sgl);
|
||||
d->addr = sg_dma_address(sgl);
|
||||
c->dir = dir;
|
||||
|
||||
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (vchan_issue_pending(&c->vc)) {
|
||||
vd = vchan_next_desc(&c->vc);
|
||||
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
|
||||
|
||||
if (c->dir == DMA_DEV_TO_MEM)
|
||||
mtk_uart_apdma_start_rx(c);
|
||||
else if (c->dir == DMA_MEM_TO_DEV)
|
||||
mtk_uart_apdma_start_tx(c);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
|
||||
memcpy(&c->cfg, config, sizeof(*config));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
unsigned long flags;
|
||||
unsigned int status;
|
||||
LIST_HEAD(head);
|
||||
int ret;
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
|
||||
|
||||
ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
|
||||
status, status != VFF_FLUSH_B, 10, 100);
|
||||
if (ret)
|
||||
dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
|
||||
mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
|
||||
|
||||
/*
|
||||
* Stop need 3 steps.
|
||||
* 1. set stop to 1
|
||||
* 2. wait en to 0
|
||||
* 3. set stop as 0
|
||||
*/
|
||||
mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
|
||||
ret = readx_poll_timeout(readl, c->base + VFF_EN,
|
||||
status, !status, 10, 100);
|
||||
if (ret)
|
||||
dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
|
||||
mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
|
||||
if (c->dir == DMA_DEV_TO_MEM)
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
|
||||
else if (c->dir == DMA_MEM_TO_DEV)
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
||||
|
||||
synchronize_irq(c->irq);
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&c->vc, &head);
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
|
||||
synchronize_irq(c->irq);
|
||||
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
|
||||
{
|
||||
while (!list_empty(&mtkd->ddev.channels)) {
|
||||
struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
|
||||
struct mtk_chan, vc.chan.device_node);
|
||||
|
||||
list_del(&c->vc.chan.device_node);
|
||||
tasklet_kill(&c->vc.task);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct of_device_id mtk_uart_apdma_match[] = {
|
||||
{ .compatible = "mediatek,mt6577-uart-dma", },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
|
||||
|
||||
static int mtk_uart_apdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct mtk_uart_apdmadev *mtkd;
|
||||
int bit_mask = 32, rc;
|
||||
struct resource *res;
|
||||
struct mtk_chan *c;
|
||||
unsigned int i;
|
||||
|
||||
mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
|
||||
if (!mtkd)
|
||||
return -ENOMEM;
|
||||
|
||||
mtkd->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(mtkd->clk)) {
|
||||
dev_err(&pdev->dev, "No clock specified\n");
|
||||
rc = PTR_ERR(mtkd->clk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (of_property_read_bool(np, "mediatek,dma-33bits"))
|
||||
mtkd->support_33bits = true;
|
||||
|
||||
if (mtkd->support_33bits)
|
||||
bit_mask = 33;
|
||||
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
|
||||
mtkd->ddev.device_alloc_chan_resources =
|
||||
mtk_uart_apdma_alloc_chan_resources;
|
||||
mtkd->ddev.device_free_chan_resources =
|
||||
mtk_uart_apdma_free_chan_resources;
|
||||
mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
|
||||
mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
|
||||
mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
|
||||
mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
|
||||
mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
|
||||
mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
|
||||
mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
|
||||
mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
|
||||
mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
mtkd->ddev.dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&mtkd->ddev.channels);
|
||||
|
||||
mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
|
||||
if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
|
||||
dev_info(&pdev->dev,
|
||||
"Using %u as missing dma-requests property\n",
|
||||
MTK_UART_APDMA_NR_VCHANS);
|
||||
}
|
||||
|
||||
for (i = 0; i < mtkd->dma_requests; i++) {
|
||||
c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
|
||||
if (!c) {
|
||||
rc = -ENODEV;
|
||||
goto err_no_dma;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
|
||||
if (!res) {
|
||||
rc = -ENODEV;
|
||||
goto err_no_dma;
|
||||
}
|
||||
|
||||
c->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(c->base)) {
|
||||
rc = PTR_ERR(c->base);
|
||||
goto err_no_dma;
|
||||
}
|
||||
c->vc.desc_free = mtk_uart_apdma_desc_free;
|
||||
vchan_init(&c->vc, &mtkd->ddev);
|
||||
|
||||
rc = platform_get_irq(pdev, i);
|
||||
if (rc < 0) {
|
||||
dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
|
||||
goto err_no_dma;
|
||||
}
|
||||
c->irq = rc;
|
||||
}
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
|
||||
rc = dma_async_device_register(&mtkd->ddev);
|
||||
if (rc)
|
||||
goto rpm_disable;
|
||||
|
||||
platform_set_drvdata(pdev, mtkd);
|
||||
|
||||
/* Device-tree DMA controller registration */
|
||||
rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
|
||||
if (rc)
|
||||
goto dma_remove;
|
||||
|
||||
return rc;
|
||||
|
||||
dma_remove:
|
||||
dma_async_device_unregister(&mtkd->ddev);
|
||||
rpm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_no_dma:
|
||||
mtk_uart_apdma_free(mtkd);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
|
||||
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
|
||||
mtk_uart_apdma_free(mtkd);
|
||||
|
||||
dma_async_device_unregister(&mtkd->ddev);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int mtk_uart_apdma_suspend(struct device *dev)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
|
||||
|
||||
if (!pm_runtime_suspended(dev))
|
||||
clk_disable_unprepare(mtkd->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_resume(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
|
||||
|
||||
if (!pm_runtime_suspended(dev)) {
|
||||
ret = clk_prepare_enable(mtkd->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int mtk_uart_apdma_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
|
||||
|
||||
clk_disable_unprepare(mtkd->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_uart_apdma_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
|
||||
|
||||
ret = clk_prepare_enable(mtkd->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
|
||||
SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
|
||||
mtk_uart_apdma_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver mtk_uart_apdma_driver = {
|
||||
.probe = mtk_uart_apdma_probe,
|
||||
.remove = mtk_uart_apdma_remove,
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.pm = &mtk_uart_apdma_pm_ops,
|
||||
.of_match_table = of_match_ptr(mtk_uart_apdma_match),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(mtk_uart_apdma_driver);
|
||||
|
||||
MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
|
||||
MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -717,10 +717,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev)
|
|||
if (mic_dma_dbg) {
|
||||
mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
|
||||
mic_dma_dbg);
|
||||
if (mic_dma_dev->dbg_dir)
|
||||
debugfs_create_file("mic_dma_reg", 0444,
|
||||
mic_dma_dev->dbg_dir, mic_dma_dev,
|
||||
&mic_dma_reg_fops);
|
||||
debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir,
|
||||
mic_dma_dev, &mic_dma_reg_fops);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -582,18 +582,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
|||
}
|
||||
|
||||
struct mmp_tdma_filter_param {
|
||||
struct device_node *of_node;
|
||||
unsigned int chan_id;
|
||||
};
|
||||
|
||||
static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
|
||||
{
|
||||
struct mmp_tdma_filter_param *param = fn_param;
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
struct dma_device *pdma_device = tdmac->chan.device;
|
||||
|
||||
if (pdma_device->dev->of_node != param->of_node)
|
||||
return false;
|
||||
|
||||
if (chan->chan_id != param->chan_id)
|
||||
return false;
|
||||
|
@ -611,13 +605,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
|
|||
if (dma_spec->args_count != 1)
|
||||
return NULL;
|
||||
|
||||
param.of_node = ofdma->of_node;
|
||||
param.chan_id = dma_spec->args[0];
|
||||
|
||||
if (param.chan_id >= TDMA_CHANNEL_NUM)
|
||||
return NULL;
|
||||
|
||||
return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m);
|
||||
return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m,
|
||||
ofdma->of_node);
|
||||
}
|
||||
|
||||
static const struct of_device_id mmp_tdma_dt_ids[] = {
|
||||
|
|
|
@ -719,7 +719,6 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
|||
}
|
||||
|
||||
struct mxs_dma_filter_param {
|
||||
struct device_node *of_node;
|
||||
unsigned int chan_id;
|
||||
};
|
||||
|
||||
|
@ -730,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
|
|||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
int chan_irq;
|
||||
|
||||
if (mxs_dma->dma_device.dev->of_node != param->of_node)
|
||||
return false;
|
||||
|
||||
if (chan->chan_id != param->chan_id)
|
||||
return false;
|
||||
|
||||
|
@ -755,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
|
|||
if (dma_spec->args_count != 1)
|
||||
return NULL;
|
||||
|
||||
param.of_node = ofdma->of_node;
|
||||
param.chan_id = dma_spec->args[0];
|
||||
|
||||
if (param.chan_id >= mxs_dma->nr_channels)
|
||||
return NULL;
|
||||
|
||||
return dma_request_channel(mask, mxs_dma_filter_fn, ¶m);
|
||||
return __dma_request_channel(&mask, mxs_dma_filter_fn, ¶m,
|
||||
ofdma->of_node);
|
||||
}
|
||||
|
||||
static int __init mxs_dma_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -313,8 +313,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
|||
if (count != 1)
|
||||
return NULL;
|
||||
|
||||
return dma_request_channel(info->dma_cap, info->filter_fn,
|
||||
&dma_spec->args[0]);
|
||||
return __dma_request_channel(&info->dma_cap, info->filter_fn,
|
||||
&dma_spec->args[0], dma_spec->np);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
#define PL330_MAX_CHAN 8
|
||||
|
@ -496,6 +497,9 @@ struct pl330_dmac {
|
|||
unsigned int num_peripherals;
|
||||
struct dma_pl330_chan *peripherals; /* keep at end */
|
||||
int quirks;
|
||||
|
||||
struct reset_control *rstc;
|
||||
struct reset_control *rstc_ocp;
|
||||
};
|
||||
|
||||
static struct pl330_of_quirks {
|
||||
|
@ -3024,6 +3028,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
|
||||
amba_set_drvdata(adev, pl330);
|
||||
|
||||
pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
|
||||
if (IS_ERR(pl330->rstc)) {
|
||||
if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
|
||||
dev_err(&adev->dev, "Failed to get reset!\n");
|
||||
return PTR_ERR(pl330->rstc);
|
||||
} else {
|
||||
ret = reset_control_deassert(pl330->rstc);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
|
||||
if (IS_ERR(pl330->rstc_ocp)) {
|
||||
if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
|
||||
dev_err(&adev->dev, "Failed to get OCP reset!\n");
|
||||
return PTR_ERR(pl330->rstc_ocp);
|
||||
} else {
|
||||
ret = reset_control_deassert(pl330->rstc_ocp);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < AMBA_NR_IRQS; i++) {
|
||||
irq = adev->irq[i];
|
||||
if (irq) {
|
||||
|
@ -3164,6 +3194,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
probe_err2:
|
||||
pl330_del(pl330);
|
||||
|
||||
if (pl330->rstc_ocp)
|
||||
reset_control_assert(pl330->rstc_ocp);
|
||||
|
||||
if (pl330->rstc)
|
||||
reset_control_assert(pl330->rstc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3202,6 +3237,11 @@ static int pl330_remove(struct amba_device *adev)
|
|||
|
||||
pl330_del(pl330);
|
||||
|
||||
if (pl330->rstc_ocp)
|
||||
reset_control_assert(pl330->rstc_ocp);
|
||||
|
||||
if (pl330->rstc)
|
||||
reset_control_assert(pl330->rstc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,6 @@ struct pxad_device {
|
|||
spinlock_t phy_lock; /* Phy association */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *dbgfs_root;
|
||||
struct dentry *dbgfs_state;
|
||||
struct dentry **dbgfs_chan;
|
||||
#endif
|
||||
};
|
||||
|
@ -323,31 +322,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
|
|||
int ch, struct dentry *chandir)
|
||||
{
|
||||
char chan_name[11];
|
||||
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
|
||||
struct dentry *chan_reqs = NULL;
|
||||
struct dentry *chan;
|
||||
void *dt;
|
||||
|
||||
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
|
||||
chan = debugfs_create_dir(chan_name, chandir);
|
||||
dt = (void *)&pdev->phys[ch];
|
||||
|
||||
if (chan)
|
||||
chan_state = debugfs_create_file("state", 0400, chan, dt,
|
||||
&chan_state_fops);
|
||||
if (chan_state)
|
||||
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
|
||||
&descriptors_fops);
|
||||
if (chan_descr)
|
||||
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
|
||||
&requester_chan_fops);
|
||||
if (!chan_reqs)
|
||||
goto err_state;
|
||||
debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
|
||||
debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
|
||||
debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
|
||||
|
||||
return chan;
|
||||
|
||||
err_state:
|
||||
debugfs_remove_recursive(chan);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void pxad_init_debugfs(struct pxad_device *pdev)
|
||||
|
@ -355,40 +341,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev)
|
|||
int i;
|
||||
struct dentry *chandir;
|
||||
|
||||
pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
|
||||
if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
|
||||
goto err_root;
|
||||
|
||||
pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
|
||||
pdev, &state_fops);
|
||||
if (!pdev->dbgfs_state)
|
||||
goto err_state;
|
||||
|
||||
pdev->dbgfs_chan =
|
||||
kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
|
||||
kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
|
||||
GFP_KERNEL);
|
||||
if (!pdev->dbgfs_chan)
|
||||
goto err_alloc;
|
||||
return;
|
||||
|
||||
pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
|
||||
|
||||
debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
|
||||
|
||||
chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
|
||||
if (!chandir)
|
||||
goto err_chandir;
|
||||
|
||||
for (i = 0; i < pdev->nr_chans; i++) {
|
||||
for (i = 0; i < pdev->nr_chans; i++)
|
||||
pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
|
||||
if (!pdev->dbgfs_chan[i])
|
||||
goto err_chans;
|
||||
}
|
||||
|
||||
return;
|
||||
err_chans:
|
||||
err_chandir:
|
||||
kfree(pdev->dbgfs_chan);
|
||||
err_alloc:
|
||||
err_state:
|
||||
debugfs_remove_recursive(pdev->dbgfs_root);
|
||||
err_root:
|
||||
pr_err("pxad: debugfs is not available\n");
|
||||
}
|
||||
|
||||
static void pxad_cleanup_debugfs(struct pxad_device *pdev)
|
||||
|
|
|
@ -93,8 +93,6 @@ struct hidma_chan {
|
|||
* It is used by the DMA complete notification to
|
||||
* locate the descriptor that initiated the transfer.
|
||||
*/
|
||||
struct dentry *debugfs;
|
||||
struct dentry *stats;
|
||||
struct hidma_dev *dmadev;
|
||||
struct hidma_desc *running;
|
||||
|
||||
|
@ -126,7 +124,6 @@ struct hidma_dev {
|
|||
struct dma_device ddev;
|
||||
|
||||
struct dentry *debugfs;
|
||||
struct dentry *stats;
|
||||
|
||||
/* sysfs entry for the channel id */
|
||||
struct device_attribute *chid_attrs;
|
||||
|
@ -158,6 +155,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg);
|
|||
irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
|
||||
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
|
||||
u8 err_code);
|
||||
int hidma_debug_init(struct hidma_dev *dmadev);
|
||||
void hidma_debug_init(struct hidma_dev *dmadev);
|
||||
void hidma_debug_uninit(struct hidma_dev *dmadev);
|
||||
#endif
|
||||
|
|
|
@ -138,17 +138,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev)
|
|||
debugfs_remove_recursive(dmadev->debugfs);
|
||||
}
|
||||
|
||||
int hidma_debug_init(struct hidma_dev *dmadev)
|
||||
void hidma_debug_init(struct hidma_dev *dmadev)
|
||||
{
|
||||
int rc = 0;
|
||||
int chidx = 0;
|
||||
struct list_head *position = NULL;
|
||||
struct dentry *dir;
|
||||
|
||||
dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
|
||||
if (!dmadev->debugfs) {
|
||||
rc = -ENODEV;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* walk through the virtual channel list */
|
||||
list_for_each(position, &dmadev->ddev.channels) {
|
||||
|
@ -157,32 +153,13 @@ int hidma_debug_init(struct hidma_dev *dmadev)
|
|||
chan = list_entry(position, struct hidma_chan,
|
||||
chan.device_node);
|
||||
sprintf(chan->dbg_name, "chan%d", chidx);
|
||||
chan->debugfs = debugfs_create_dir(chan->dbg_name,
|
||||
dir = debugfs_create_dir(chan->dbg_name,
|
||||
dmadev->debugfs);
|
||||
if (!chan->debugfs) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
chan->stats = debugfs_create_file("stats", S_IRUGO,
|
||||
chan->debugfs, chan,
|
||||
&hidma_chan_fops);
|
||||
if (!chan->stats) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
debugfs_create_file("stats", S_IRUGO, dir, chan,
|
||||
&hidma_chan_fops);
|
||||
chidx++;
|
||||
}
|
||||
|
||||
dmadev->stats = debugfs_create_file("stats", S_IRUGO,
|
||||
dmadev->debugfs, dmadev,
|
||||
&hidma_dma_fops);
|
||||
if (!dmadev->stats) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
cleanup:
|
||||
hidma_debug_uninit(dmadev);
|
||||
return rc;
|
||||
debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev,
|
||||
&hidma_dma_fops);
|
||||
}
|
||||
|
|
|
@ -47,9 +47,3 @@ config RENESAS_USB_DMAC
|
|||
help
|
||||
This driver supports the USB-DMA controller found in the Renesas
|
||||
SoCs.
|
||||
|
||||
config SUDMAC
|
||||
tristate "Renesas SUDMAC support"
|
||||
depends on SH_DMAE_BASE
|
||||
help
|
||||
Enable support for the Renesas SUDMAC controllers.
|
||||
|
|
|
@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
|
|||
|
||||
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
|
||||
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
|
||||
obj-$(CONFIG_SUDMAC) += sudmac.o
|
||||
|
|
|
@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
||||
|
||||
/* Someone calling slave DMA on a generic channel? */
|
||||
if (rchan->mid_rid < 0 || !sg_len) {
|
||||
if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
|
||||
dev_warn(chan->device->dev,
|
||||
"%s: bad parameter: len=%d, id=%d\n",
|
||||
__func__, sg_len, rchan->mid_rid);
|
||||
|
@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
|
|||
* Forcing it to call dma_request_channel() and iterate through all
|
||||
* channels from all controllers is just pointless.
|
||||
*/
|
||||
if (chan->device->device_config != rcar_dmac_device_config ||
|
||||
dma_spec->np != chan->device->dev->of_node)
|
||||
if (chan->device->device_config != rcar_dmac_device_config)
|
||||
return false;
|
||||
|
||||
return !test_and_set_bit(dma_spec->args[0], dmac->modules);
|
||||
|
@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
|||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
|
||||
chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
|
||||
ofdma->of_node);
|
||||
if (!chan)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1,414 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Renesas SUDMAC support
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Solutions Corp.
|
||||
*
|
||||
* based on drivers/dma/sh/shdma.c:
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sudmac.h>
|
||||
|
||||
struct sudmac_chan {
|
||||
struct shdma_chan shdma_chan;
|
||||
void __iomem *base;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
|
||||
u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
|
||||
u32 cfg;
|
||||
u32 dint_end_bit;
|
||||
};
|
||||
|
||||
struct sudmac_device {
|
||||
struct shdma_dev shdma_dev;
|
||||
struct sudmac_pdata *pdata;
|
||||
void __iomem *chan_reg;
|
||||
};
|
||||
|
||||
struct sudmac_regs {
|
||||
u32 base_addr;
|
||||
u32 base_byte_count;
|
||||
};
|
||||
|
||||
struct sudmac_desc {
|
||||
struct sudmac_regs hw;
|
||||
struct shdma_desc shdma_desc;
|
||||
};
|
||||
|
||||
#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
|
||||
#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
|
||||
#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
|
||||
struct sudmac_device, shdma_dev.dma_dev)
|
||||
|
||||
/* SUDMAC register */
|
||||
#define SUDMAC_CH0CFG 0x00
|
||||
#define SUDMAC_CH0BA 0x10
|
||||
#define SUDMAC_CH0BBC 0x18
|
||||
#define SUDMAC_CH0CA 0x20
|
||||
#define SUDMAC_CH0CBC 0x28
|
||||
#define SUDMAC_CH0DEN 0x30
|
||||
#define SUDMAC_DSTSCLR 0x38
|
||||
#define SUDMAC_DBUFCTRL 0x3C
|
||||
#define SUDMAC_DINTCTRL 0x40
|
||||
#define SUDMAC_DINTSTS 0x44
|
||||
#define SUDMAC_DINTSTSCLR 0x48
|
||||
#define SUDMAC_CH0SHCTRL 0x50
|
||||
|
||||
/* Definitions for the sudmac_channel.config */
|
||||
#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
|
||||
#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
|
||||
#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
|
||||
|
||||
/* Definitions for the sudmac_channel.dint_end_bit */
|
||||
#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
|
||||
#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
|
||||
|
||||
#define SUDMAC_DRV_NAME "sudmac"
|
||||
|
||||
static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
|
||||
{
|
||||
iowrite32(data, sc->base + reg);
|
||||
}
|
||||
|
||||
static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
|
||||
{
|
||||
return ioread32(sc->base + reg);
|
||||
}
|
||||
|
||||
static bool sudmac_is_busy(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
|
||||
|
||||
if (den)
|
||||
return true; /* working */
|
||||
|
||||
return false; /* waiting */
|
||||
}
|
||||
|
||||
static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
|
||||
sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
|
||||
sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
|
||||
}
|
||||
|
||||
static void sudmac_start(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||||
|
||||
sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||||
sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
|
||||
}
|
||||
|
||||
static void sudmac_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
|
||||
sudmac_set_reg(sc, &sd->hw, sdesc);
|
||||
sudmac_start(sc);
|
||||
}
|
||||
|
||||
static bool sudmac_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
|
||||
return sudmac_is_busy(sc);
|
||||
}
|
||||
|
||||
static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct sudmac_slave_config *sudmac_find_slave(
|
||||
struct sudmac_chan *sc, int slave_id)
|
||||
{
|
||||
struct sudmac_device *sdev = to_sdev(sc);
|
||||
struct sudmac_pdata *pdata = sdev->pdata;
|
||||
const struct sudmac_slave_config *cfg;
|
||||
int i;
|
||||
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->slave_id == slave_id)
|
||||
return cfg;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
|
||||
|
||||
if (!cfg)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void sudmac_dma_halt(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||||
|
||||
sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
|
||||
sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||||
sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
|
||||
}
|
||||
|
||||
static int sudmac_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
|
||||
dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
|
||||
__func__, &src, &dst, *len);
|
||||
|
||||
if (*len > schan->max_xfer_len)
|
||||
*len = schan->max_xfer_len;
|
||||
|
||||
if (dst)
|
||||
sd->hw.base_addr = dst;
|
||||
else if (src)
|
||||
sd->hw.base_addr = src;
|
||||
sd->hw.base_byte_count = *len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sudmac_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
|
||||
sudmac_dma_halt(sc);
|
||||
}
|
||||
|
||||
static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
|
||||
|
||||
if (!(dintsts & sc->dint_end_bit))
|
||||
return false;
|
||||
|
||||
/* DMA stop */
|
||||
sudmac_dma_halt(sc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static size_t sudmac_get_partial(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
|
||||
|
||||
return sd->hw.base_byte_count - current_byte_count;
|
||||
}
|
||||
|
||||
static bool sudmac_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
|
||||
|
||||
return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
|
||||
}
|
||||
|
||||
static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct shdma_dev *sdev = &su_dev->shdma_dev;
|
||||
struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||||
struct sudmac_chan *sc;
|
||||
struct shdma_chan *schan;
|
||||
int err;
|
||||
|
||||
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
|
||||
if (!sc)
|
||||
return -ENOMEM;
|
||||
|
||||
schan = &sc->shdma_chan;
|
||||
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
sc->base = su_dev->chan_reg;
|
||||
|
||||
/* get platform_data */
|
||||
sc->offset = su_dev->pdata->channel->offset;
|
||||
if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
|
||||
sc->cfg |= SUDMAC_SENDBUFM;
|
||||
if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
|
||||
sc->cfg |= SUDMAC_RCVENDM;
|
||||
sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
|
||||
|
||||
if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
|
||||
sc->dint_end_bit |= SUDMAC_CH0ENDE;
|
||||
if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
|
||||
sc->dint_end_bit |= SUDMAC_CH1ENDE;
|
||||
|
||||
/* set up channel irq */
|
||||
if (pdev->id >= 0)
|
||||
snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
|
||||
pdev->id, id);
|
||||
else
|
||||
snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
|
||||
|
||||
err = shdma_request_irq(schan, irq, flags, sc->dev_id);
|
||||
if (err) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"DMA channel %d request_irq failed %d\n", id, err);
|
||||
goto err_no_irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_no_irq:
|
||||
/* remove from dmaengine device node */
|
||||
shdma_chan_remove(schan);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sudmac_chan_remove(struct sudmac_device *su_dev)
|
||||
{
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
}
|
||||
|
||||
static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
/* SUDMAC doesn't need the address */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct sudmac_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops sudmac_shdma_ops = {
|
||||
.desc_completed = sudmac_desc_completed,
|
||||
.halt_channel = sudmac_halt,
|
||||
.channel_busy = sudmac_channel_busy,
|
||||
.slave_addr = sudmac_slave_addr,
|
||||
.desc_setup = sudmac_desc_setup,
|
||||
.set_slave = sudmac_set_slave,
|
||||
.setup_xfer = sudmac_setup_xfer,
|
||||
.start_xfer = sudmac_start_xfer,
|
||||
.embedded_desc = sudmac_embedded_desc,
|
||||
.chan_irq = sudmac_chan_irq,
|
||||
.get_partial = sudmac_get_partial,
|
||||
};
|
||||
|
||||
static int sudmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
|
||||
int err, i;
|
||||
struct sudmac_device *su_dev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *irq_res;
|
||||
|
||||
/* get platform data */
|
||||
if (!pdata)
|
||||
return -ENODEV;
|
||||
|
||||
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!irq_res)
|
||||
return -ENODEV;
|
||||
|
||||
err = -ENOMEM;
|
||||
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
|
||||
GFP_KERNEL);
|
||||
if (!su_dev)
|
||||
return err;
|
||||
|
||||
dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
|
||||
if (IS_ERR(su_dev->chan_reg))
|
||||
return PTR_ERR(su_dev->chan_reg);
|
||||
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
su_dev->shdma_dev.ops = &sudmac_shdma_ops;
|
||||
su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
|
||||
err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* platform data */
|
||||
su_dev->pdata = dev_get_platdata(&pdev->dev);
|
||||
|
||||
platform_set_drvdata(pdev, su_dev);
|
||||
|
||||
/* Create DMA Channel */
|
||||
for (i = 0; i < pdata->channel_num; i++) {
|
||||
err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
|
||||
if (err)
|
||||
goto chan_probe_err;
|
||||
}
|
||||
|
||||
err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
|
||||
if (err < 0)
|
||||
goto chan_probe_err;
|
||||
|
||||
return err;
|
||||
|
||||
chan_probe_err:
|
||||
sudmac_chan_remove(su_dev);
|
||||
|
||||
shdma_cleanup(&su_dev->shdma_dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sudmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sudmac_device *su_dev = platform_get_drvdata(pdev);
|
||||
struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||
|
||||
dma_async_device_unregister(dma_dev);
|
||||
sudmac_chan_remove(su_dev);
|
||||
shdma_cleanup(&su_dev->shdma_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sudmac_driver = {
|
||||
.driver = {
|
||||
.name = SUDMAC_DRV_NAME,
|
||||
},
|
||||
.probe = sudmac_probe,
|
||||
.remove = sudmac_remove,
|
||||
};
|
||||
module_platform_driver(sudmac_driver);
|
||||
|
||||
MODULE_AUTHOR("Yoshihiro Shimoda");
|
||||
MODULE_DESCRIPTION("Renesas SUDMAC driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
|
|
@ -57,7 +57,7 @@ struct usb_dmac_desc {
|
|||
u32 residue;
|
||||
struct list_head node;
|
||||
dma_cookie_t done_cookie;
|
||||
struct usb_dmac_sg sg[0];
|
||||
struct usb_dmac_sg sg[];
|
||||
};
|
||||
|
||||
#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
|
||||
|
@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
|
|||
struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
|
||||
struct of_phandle_args *dma_spec = arg;
|
||||
|
||||
if (dma_spec->np != chan->device->dev->of_node)
|
||||
return false;
|
||||
|
||||
/* USB-DMAC should be used with fixed usb controller's FIFO */
|
||||
if (uchan->index != dma_spec->args[0])
|
||||
return false;
|
||||
|
@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
|||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
|
||||
chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
|
||||
ofdma->of_node);
|
||||
if (!chan)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1365,7 +1365,6 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
|
||||
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
|
||||
chan = &dmadev->chan[i];
|
||||
chan->irq = platform_get_irq(pdev, i);
|
||||
ret = platform_get_irq(pdev, i);
|
||||
if (ret < 0) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
|
|
|
@ -295,8 +295,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
#ifdef CONFIG_PM
|
||||
static int stm32_dmamux_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev =
|
||||
container_of(dev, struct platform_device, dev);
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable_unprepare(stm32_dmamux->clk);
|
||||
|
@ -306,8 +305,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev)
|
|||
|
||||
static int stm32_dmamux_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev =
|
||||
container_of(dev, struct platform_device, dev);
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -64,17 +64,20 @@
|
|||
#define DMA_CHAN_LLI_ADDR 0x08
|
||||
|
||||
#define DMA_CHAN_CUR_CFG 0x0c
|
||||
#define DMA_CHAN_MAX_DRQ 0x1f
|
||||
#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ)
|
||||
#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
|
||||
#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
|
||||
#define DMA_CHAN_MAX_DRQ_A31 0x1f
|
||||
#define DMA_CHAN_MAX_DRQ_H6 0x3f
|
||||
#define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31)
|
||||
#define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6)
|
||||
#define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5)
|
||||
#define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8)
|
||||
#define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7)
|
||||
#define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6)
|
||||
#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
|
||||
|
||||
#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
|
||||
#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
|
||||
#define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
|
||||
#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
|
||||
|
@ -94,6 +97,8 @@
|
|||
#define LLI_LAST_ITEM 0xfffff800
|
||||
#define NORMAL_WAIT 8
|
||||
#define DRQ_SDRAM 1
|
||||
#define LINEAR_MODE 0
|
||||
#define IO_MODE 1
|
||||
|
||||
/* forward declaration */
|
||||
struct sun6i_dma_dev;
|
||||
|
@ -121,10 +126,13 @@ struct sun6i_dma_config {
|
|||
*/
|
||||
void (*clock_autogate_enable)(struct sun6i_dma_dev *);
|
||||
void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst);
|
||||
void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq);
|
||||
void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode);
|
||||
u32 src_burst_lengths;
|
||||
u32 dst_burst_lengths;
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
bool has_mbus_clk;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -178,6 +186,7 @@ struct sun6i_dma_dev {
|
|||
struct dma_device slave;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct clk *clk_mbus;
|
||||
int irq;
|
||||
spinlock_t lock;
|
||||
struct reset_control *rstc;
|
||||
|
@ -305,6 +314,30 @@ static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst)
|
|||
DMA_CHAN_CFG_DST_BURST_H3(dst_burst);
|
||||
}
|
||||
|
||||
static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq)
|
||||
{
|
||||
*p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) |
|
||||
DMA_CHAN_CFG_DST_DRQ_A31(dst_drq);
|
||||
}
|
||||
|
||||
static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq)
|
||||
{
|
||||
*p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) |
|
||||
DMA_CHAN_CFG_DST_DRQ_H6(dst_drq);
|
||||
}
|
||||
|
||||
static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode)
|
||||
{
|
||||
*p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) |
|
||||
DMA_CHAN_CFG_DST_MODE_A31(dst_mode);
|
||||
}
|
||||
|
||||
static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode)
|
||||
{
|
||||
*p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) |
|
||||
DMA_CHAN_CFG_DST_MODE_H6(dst_mode);
|
||||
}
|
||||
|
||||
static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
|
||||
{
|
||||
struct sun6i_desc *txd = pchan->desc;
|
||||
|
@ -628,14 +661,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
|
|||
|
||||
burst = convert_burst(8);
|
||||
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_WIDTH(width) |
|
||||
v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) |
|
||||
DMA_CHAN_CFG_DST_WIDTH(width);
|
||||
|
||||
sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst);
|
||||
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE);
|
||||
|
||||
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
|
||||
|
||||
|
@ -687,11 +718,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
if (dir == DMA_MEM_TO_DEV) {
|
||||
v_lli->src = sg_dma_address(sg);
|
||||
v_lli->dst = sconfig->dst_addr;
|
||||
v_lli->cfg = lli_cfg |
|
||||
DMA_CHAN_CFG_DST_IO_MODE |
|
||||
DMA_CHAN_CFG_SRC_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_DRQ(vchan->port);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
|
||||
|
@ -702,11 +731,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
} else {
|
||||
v_lli->src = sconfig->src_addr;
|
||||
v_lli->dst = sg_dma_address(sg);
|
||||
v_lli->cfg = lli_cfg |
|
||||
DMA_CHAN_CFG_DST_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_IO_MODE |
|
||||
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_SRC_DRQ(vchan->port);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
|
||||
|
@ -772,19 +799,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
|
|||
if (dir == DMA_MEM_TO_DEV) {
|
||||
v_lli->src = buf_addr + period_len * i;
|
||||
v_lli->dst = sconfig->dst_addr;
|
||||
v_lli->cfg = lli_cfg |
|
||||
DMA_CHAN_CFG_DST_IO_MODE |
|
||||
DMA_CHAN_CFG_SRC_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_DRQ(vchan->port);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
|
||||
} else {
|
||||
v_lli->src = sconfig->src_addr;
|
||||
v_lli->dst = buf_addr + period_len * i;
|
||||
v_lli->cfg = lli_cfg |
|
||||
DMA_CHAN_CFG_DST_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_IO_MODE |
|
||||
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_SRC_DRQ(vchan->port);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
|
||||
}
|
||||
|
||||
prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
|
||||
|
@ -1049,6 +1072,8 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = {
|
|||
.nr_max_requests = 30,
|
||||
.nr_max_vchans = 53,
|
||||
.set_burst_length = sun6i_set_burst_length_a31,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(8),
|
||||
.dst_burst_lengths = BIT(1) | BIT(8),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1070,6 +1095,8 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
|
|||
.nr_max_vchans = 37,
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
|
||||
.set_burst_length = sun6i_set_burst_length_a31,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(8),
|
||||
.dst_burst_lengths = BIT(1) | BIT(8),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1086,6 +1113,8 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
|
|||
.nr_max_vchans = 39,
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
|
||||
.set_burst_length = sun6i_set_burst_length_a31,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(8),
|
||||
.dst_burst_lengths = BIT(1) | BIT(8),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1109,6 +1138,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
|
|||
.nr_max_vchans = 34,
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
|
||||
.set_burst_length = sun6i_set_burst_length_h3,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1128,6 +1159,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
|
|||
static struct sun6i_dma_config sun50i_a64_dma_cfg = {
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
|
||||
.set_burst_length = sun6i_set_burst_length_h3,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1140,6 +1173,28 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
|
|||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
|
||||
};
|
||||
|
||||
/*
|
||||
* The H6 binding uses the number of dma channels from the
|
||||
* device tree node.
|
||||
*/
|
||||
static struct sun6i_dma_config sun50i_h6_dma_cfg = {
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
|
||||
.set_burst_length = sun6i_set_burst_length_h3,
|
||||
.set_drq = sun6i_set_drq_h6,
|
||||
.set_mode = sun6i_set_mode_h6,
|
||||
.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
|
||||
.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
|
||||
.has_mbus_clk = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* The V3s have only 8 physical channels, a maximum DRQ port id of 23,
|
||||
* and a total of 24 usable source and destination endpoints.
|
||||
|
@ -1151,6 +1206,8 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
|
|||
.nr_max_vchans = 24,
|
||||
.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
|
||||
.set_burst_length = sun6i_set_burst_length_a31,
|
||||
.set_drq = sun6i_set_drq_a31,
|
||||
.set_mode = sun6i_set_mode_a31,
|
||||
.src_burst_lengths = BIT(1) | BIT(8),
|
||||
.dst_burst_lengths = BIT(1) | BIT(8),
|
||||
.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1168,6 +1225,7 @@ static const struct of_device_id sun6i_dma_match[] = {
|
|||
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
|
||||
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
|
||||
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sun6i_dma_match);
|
||||
|
@ -1204,6 +1262,14 @@ static int sun6i_dma_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(sdc->clk);
|
||||
}
|
||||
|
||||
if (sdc->cfg->has_mbus_clk) {
|
||||
sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus");
|
||||
if (IS_ERR(sdc->clk_mbus)) {
|
||||
dev_err(&pdev->dev, "No mbus clock specified\n");
|
||||
return PTR_ERR(sdc->clk_mbus);
|
||||
}
|
||||
}
|
||||
|
||||
sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(sdc->rstc)) {
|
||||
dev_err(&pdev->dev, "No reset controller specified\n");
|
||||
|
@ -1258,8 +1324,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
|
|||
ret = of_property_read_u32(np, "dma-requests", &sdc->max_request);
|
||||
if (ret && !sdc->max_request) {
|
||||
dev_info(&pdev->dev, "Missing dma-requests, using %u.\n",
|
||||
DMA_CHAN_MAX_DRQ);
|
||||
sdc->max_request = DMA_CHAN_MAX_DRQ;
|
||||
DMA_CHAN_MAX_DRQ_A31);
|
||||
sdc->max_request = DMA_CHAN_MAX_DRQ_A31;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1308,11 +1374,19 @@ static int sun6i_dma_probe(struct platform_device *pdev)
|
|||
goto err_reset_assert;
|
||||
}
|
||||
|
||||
if (sdc->cfg->has_mbus_clk) {
|
||||
ret = clk_prepare_enable(sdc->clk_mbus);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Couldn't enable mbus clock\n");
|
||||
goto err_clk_disable;
|
||||
}
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
|
||||
dev_name(&pdev->dev), sdc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Cannot request IRQ\n");
|
||||
goto err_clk_disable;
|
||||
goto err_mbus_clk_disable;
|
||||
}
|
||||
|
||||
ret = dma_async_device_register(&sdc->slave);
|
||||
|
@ -1337,6 +1411,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
|
|||
dma_async_device_unregister(&sdc->slave);
|
||||
err_irq_disable:
|
||||
sun6i_kill_tasklet(sdc);
|
||||
err_mbus_clk_disable:
|
||||
clk_disable_unprepare(sdc->clk_mbus);
|
||||
err_clk_disable:
|
||||
clk_disable_unprepare(sdc->clk);
|
||||
err_reset_assert:
|
||||
|
@ -1355,6 +1431,7 @@ static int sun6i_dma_remove(struct platform_device *pdev)
|
|||
|
||||
sun6i_kill_tasklet(sdc);
|
||||
|
||||
clk_disable_unprepare(sdc->clk_mbus);
|
||||
clk_disable_unprepare(sdc->clk);
|
||||
reset_control_assert(sdc->rstc);
|
||||
|
||||
|
|
|
@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
|
|||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
if (flags & DMA_PREP_INTERRUPT) {
|
||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||
|
||||
|
@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
|||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
if (flags & DMA_PREP_INTERRUPT) {
|
||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ static void vchan_complete(unsigned long arg)
|
|||
}
|
||||
spin_unlock_irq(&vc->lock);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
|
||||
|
||||
list_for_each_entry_safe(vd, _vd, &head, node) {
|
||||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||
|
@ -106,7 +106,7 @@ static void vchan_complete(unsigned long arg)
|
|||
list_del(&vd->node);
|
||||
vchan_vdesc_fini(vd);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
struct virt_dma_desc {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
struct dmaengine_result tx_result;
|
||||
/* protected by vc.lock */
|
||||
struct list_head node;
|
||||
};
|
||||
|
@ -62,6 +63,9 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
|
|||
vd->tx.tx_submit = vchan_tx_submit;
|
||||
vd->tx.desc_free = vchan_tx_desc_free;
|
||||
|
||||
vd->tx_result.result = DMA_TRANS_NOERROR;
|
||||
vd->tx_result.residue = 0;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
list_add_tail(&vd->node, &vc->desc_allocated);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
|
|
@ -1095,7 +1095,7 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan)
|
|||
static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
{
|
||||
struct xilinx_vdma_config *config = &chan->config;
|
||||
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
|
||||
struct xilinx_dma_tx_descriptor *desc;
|
||||
u32 reg, j;
|
||||
struct xilinx_vdma_tx_segment *segment, *last = NULL;
|
||||
int i = 0;
|
||||
|
@ -1112,8 +1112,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|||
|
||||
desc = list_first_entry(&chan->pending_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
tail_desc = list_last_entry(&chan->pending_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
|
||||
/* Configure the hardware using info in the config structure */
|
||||
if (chan->has_vflip) {
|
||||
|
|
|
@ -793,7 +793,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
|
||||
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
|
||||
.driver_data = (kernel_ulong_t)&am654_data
|
||||
},
|
||||
|
|
|
@ -99,7 +99,7 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse)
|
|||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL);
|
||||
fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL);
|
||||
if (!fuse->apbdma.chan)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
|
|
47
include/linux/dma/edma.h
Normal file
47
include/linux/dma/edma.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
|
||||
* Synopsys DesignWare eDMA core driver
|
||||
*
|
||||
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
*/
|
||||
|
||||
#ifndef _DW_EDMA_H
|
||||
#define _DW_EDMA_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
struct dw_edma;
|
||||
|
||||
/**
|
||||
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
|
||||
* @dev: struct device of the eDMA controller
|
||||
* @id: instance ID
|
||||
* @irq: irq line
|
||||
* @dw: struct dw_edma that is filed by dw_edma_probe()
|
||||
*/
|
||||
struct dw_edma_chip {
|
||||
struct device *dev;
|
||||
int id;
|
||||
int irq;
|
||||
struct dw_edma *dw;
|
||||
};
|
||||
|
||||
/* Export to the platform drivers */
|
||||
#if IS_ENABLED(CONFIG_DW_EDMA)
|
||||
int dw_edma_probe(struct dw_edma_chip *chip);
|
||||
int dw_edma_remove(struct dw_edma_chip *chip);
|
||||
#else
|
||||
static inline int dw_edma_probe(struct dw_edma_chip *chip)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int dw_edma_remove(struct dw_edma_chip *chip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DW_EDMA */
|
||||
|
||||
#endif /* _DW_EDMA_H */
|
|
@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
|
|||
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
||||
void dma_issue_pending_all(void);
|
||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param);
|
||||
dma_filter_fn fn, void *fn_param,
|
||||
struct device_node *np);
|
||||
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
||||
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||
|
@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void)
|
|||
{
|
||||
}
|
||||
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param)
|
||||
dma_filter_fn fn,
|
||||
void *fn_param,
|
||||
struct device_node *np)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device);
|
|||
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
|
||||
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
|
||||
#define dma_request_channel(mask, x, y) \
|
||||
__dma_request_channel(&(mask), x, y, NULL)
|
||||
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
|
||||
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
|
||||
|
||||
|
@ -1417,6 +1421,6 @@ static inline struct dma_chan
|
|||
if (!fn || !fn_param)
|
||||
return NULL;
|
||||
|
||||
return __dma_request_channel(mask, fn, fn_param);
|
||||
return __dma_request_channel(mask, fn, fn_param, NULL);
|
||||
}
|
||||
#endif /* DMAENGINE_H */
|
||||
|
|
19
include/linux/fpga/adi-axi-common.h
Normal file
19
include/linux/fpga/adi-axi-common.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Analog Devices AXI common registers & definitions
|
||||
*
|
||||
* Copyright 2019 Analog Devices Inc.
|
||||
*
|
||||
* https://wiki.analog.com/resources/fpga/docs/axi_ip
|
||||
* https://wiki.analog.com/resources/fpga/docs/hdl/regmap
|
||||
*/
|
||||
|
||||
#ifndef ADI_AXI_COMMON_H_
|
||||
#define ADI_AXI_COMMON_H_
|
||||
|
||||
#define ADI_AXI_REG_VERSION 0x0000
|
||||
|
||||
#define ADI_AXI_PCORE_VER(major, minor, patch) \
|
||||
(((major) << 16) | ((minor) << 8) | (patch))
|
||||
|
||||
#endif /* ADI_AXI_COMMON_H_ */
|
|
@ -2367,6 +2367,7 @@
|
|||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda
|
||||
|
||||
#define PCI_VENDOR_ID_USR 0x16ec
|
||||
|
||||
|
|
|
@ -52,7 +52,6 @@ struct imx_dma_data {
|
|||
int dma_request2; /* secondary DMA request line */
|
||||
enum sdma_peripheral_type peripheral_type;
|
||||
int priority;
|
||||
struct device_node *of_node;
|
||||
};
|
||||
|
||||
static inline int imx_dma_is_ipu(struct dma_chan *chan)
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Header for the SUDMAC driver
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Solutions Corp.
|
||||
*/
|
||||
#ifndef SUDMAC_H
|
||||
#define SUDMAC_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Used by slave DMA clients to request DMA to/from a specific peripheral */
|
||||
struct sudmac_slave {
|
||||
struct shdma_slave shdma_slave; /* Set by the platform */
|
||||
};
|
||||
|
||||
/*
|
||||
* Supplied by platforms to specify, how a DMA channel has to be configured for
|
||||
* a certain peripheral
|
||||
*/
|
||||
struct sudmac_slave_config {
|
||||
int slave_id;
|
||||
};
|
||||
|
||||
struct sudmac_channel {
|
||||
unsigned long offset;
|
||||
unsigned long config;
|
||||
unsigned long wait; /* The configuable range is 0 to 3 */
|
||||
unsigned long dint_end_bit;
|
||||
};
|
||||
|
||||
struct sudmac_pdata {
|
||||
const struct sudmac_slave_config *slave;
|
||||
int slave_num;
|
||||
const struct sudmac_channel *channel;
|
||||
int channel_num;
|
||||
};
|
||||
|
||||
/* Definitions for the sudmac_channel.config */
|
||||
#define SUDMAC_TX_BUFFER_MODE BIT(0)
|
||||
#define SUDMAC_RX_END_MODE BIT(1)
|
||||
|
||||
/* Definitions for the sudmac_channel.dint_end_bit */
|
||||
#define SUDMAC_DMA_BIT_CH0 BIT(0)
|
||||
#define SUDMAC_DMA_BIT_CH1 BIT(1)
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user