dmaengine updates for v6.19
Updates:
- Renesas driver conversion to RUNTIME_PM_OPS() etc
- Dropping module alias on bunch of drivers
- GPI Block event interrupt support in Qualcomm driver and updates to I2C
driver as well
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmk2fnQACgkQfBQHDyUj
g0e9wxAAm+fgx8YT3P8l/8+LWaIpvtuz1qXaiMFK2U9KFWUxOXusIWhIR3FwSvbz
q9MUaFrBrG2qB5Vgj2fzI0mj7kx3oXRj3NPWwLde1CJL/xi/EQYcDyR7Yd4aJyyN
YjLh3XV4FlGrFRgFvK6CIQ7duEH3akdZzmjZi9LjjDtqeQKqxrBepcrQkqLaTsMI
hNt9ZeRrQlJsnzNzXe6B5asra6DI/70mXAfc3xvb/foY84xWC19e81QNDHRZtx10
SiWuZeDTT00zAg9G26j8W/ccFKpQoiRTIpKI4zPJicwsL84/55+12ZiEKWMrEAbT
4TCMKPfRIEhHTvZg7mJ5gNmxlQ3ULYs6UK9JdiF0hOvJ2Jg6T3/ah97WaYSgoO4K
8eq/tDk2sM5UflR5MNyt3mwLcY/DEZyUAfJgpBs1t+RdFfvdxBQ3zT7H7cuHug11
6qdRWjjw4USm4GgG3iAynOVwVmSRoAfEB6XVQ74R63ehb0fxK4H0Zm2razgTlXLt
TkJ9cAJp2L/7nKc8xskMlynM6sopXHJ9GHAkdp9t1OPQ6dFRWlNmv6tgqFUrJNz0
c929A5tBxWkdJG4xLNAsZ/rZf/w74DYtUe6Xc8JOfXa2YygQU+1cx2zAiRLdm53D
v5qaNPruB+DolbXz5NAc5jNR2IH+XlNmzA7GAvLR7WZd4p2uukc=
=SEQh
-----END PGP SIGNATURE-----
Merge tag 'dmaengine-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
- Renesas driver conversion to RUNTIME_PM_OPS() etc
- Dropping module alias on bunch of drivers
- GPI Block event interrupt support in Qualcomm driver and updates to
I2C driver as well
* tag 'dmaengine-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (23 commits)
dt-bindings: dma: xilinx: Simplify dma-coherent property
dmaengine: fsl-edma: configure tcd attr with separate src and dst settings
dmaengine: st_fdma: drop unused module alias
dmaengine: bcm2835: enable compile testing
dmaengine: tegra210-adma: drop unused module alias
dmaengine: sprd: drop unused module alias
dmaengine: mmp_tdma: drop unnecessary OF node check in remove
dmaengine: mmp_tdma: drop unused module alias
dmaengine: k3dma: drop unused module alias
dmaengine: fsl-qdma: drop unused module alias
dmaengine: fsl-edma: drop unused module alias
dmaengine: dw: drop unused module alias
dmaengine: bcm2835: drop unused module alias
dmaengine: at_hdmac: add COMPILE_TEST support
dmaengine: at_hdmac: fix formats under 64-bit
i2c: i2c-qcom-geni: Add Block event interrupt support
dmaengine: qcom: gpi: Add GPI Block event interrupt support
dmaengine: idxd: drain ATS translations when disabling WQ
dmaengine: sh: Kconfig: Drop ARCH_R7S72100/ARCH_RZG2L dependency
dmaengine: rcar-dmac: Convert to NOIRQ_SYSTEM_SLEEP/RUNTIME_PM_OPS()
...
pull/1354/merge
commit
990fa99821
|
|
@ -59,8 +59,7 @@ properties:
|
||||||
power-domains:
|
power-domains:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
dma-coherent:
|
dma-coherent: true
|
||||||
description: present if dma operations are coherent
|
|
||||||
|
|
||||||
required:
|
required:
|
||||||
- "#dma-cells"
|
- "#dma-cells"
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ config ARM_DMA350
|
||||||
|
|
||||||
config AT_HDMAC
|
config AT_HDMAC
|
||||||
tristate "Atmel AHB DMA support"
|
tristate "Atmel AHB DMA support"
|
||||||
depends on ARCH_AT91
|
depends on ARCH_AT91 || COMPILE_TEST
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
|
|
@ -143,7 +143,7 @@ config BCM_SBA_RAID
|
||||||
|
|
||||||
config DMA_BCM2835
|
config DMA_BCM2835
|
||||||
tristate "BCM2835 DMA engine support"
|
tristate "BCM2835 DMA engine support"
|
||||||
depends on ARCH_BCM2835
|
depends on ARCH_BCM2835 || COMPILE_TEST
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -887,7 +887,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
|
||||||
first = xt->sgl;
|
first = xt->sgl;
|
||||||
|
|
||||||
dev_info(chan2dev(chan),
|
dev_info(chan2dev(chan),
|
||||||
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
|
"%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
|
||||||
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
||||||
xt->frame_size, flags);
|
xt->frame_size, flags);
|
||||||
|
|
||||||
|
|
@ -1174,7 +1174,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
|
dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%x f0x%lx\n", __func__,
|
||||||
value, sg_len, flags);
|
value, sg_len, flags);
|
||||||
|
|
||||||
if (unlikely(!sgl || !sg_len)) {
|
if (unlikely(!sgl || !sg_len)) {
|
||||||
|
|
@ -1503,7 +1503,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||||
unsigned int periods = buf_len / period_len;
|
unsigned int periods = buf_len / period_len;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
|
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%zu/%zu)\n",
|
||||||
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
||||||
&buf_addr,
|
&buf_addr,
|
||||||
periods, buf_len, period_len);
|
periods, buf_len, period_len);
|
||||||
|
|
|
||||||
|
|
@ -1060,7 +1060,6 @@ static struct platform_driver bcm2835_dma_driver = {
|
||||||
|
|
||||||
module_platform_driver(bcm2835_dma_driver);
|
module_platform_driver(bcm2835_dma_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("platform:bcm2835-dma");
|
|
||||||
MODULE_DESCRIPTION("BCM2835 DMA engine driver");
|
MODULE_DESCRIPTION("BCM2835 DMA engine driver");
|
||||||
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
|
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,6 @@
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#define DRV_NAME "dw_dmac"
|
|
||||||
|
|
||||||
static int dw_probe(struct platform_device *pdev)
|
static int dw_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
const struct dw_dma_chip_pdata *match;
|
const struct dw_dma_chip_pdata *match;
|
||||||
|
|
@ -190,7 +188,7 @@ static struct platform_driver dw_driver = {
|
||||||
.remove = dw_remove,
|
.remove = dw_remove,
|
||||||
.shutdown = dw_shutdown,
|
.shutdown = dw_shutdown,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = DRV_NAME,
|
.name = "dw_dmac",
|
||||||
.pm = pm_sleep_ptr(&dw_dev_pm_ops),
|
.pm = pm_sleep_ptr(&dw_dev_pm_ops),
|
||||||
.of_match_table = of_match_ptr(dw_dma_of_id_table),
|
.of_match_table = of_match_ptr(dw_dma_of_id_table),
|
||||||
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
|
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
|
||||||
|
|
@ -211,4 +209,3 @@ module_exit(dw_exit);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
|
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
|
||||||
MODULE_ALIAS("platform:" DRV_NAME);
|
|
||||||
|
|
|
||||||
|
|
@ -206,15 +206,19 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
|
||||||
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
|
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
|
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width,
|
||||||
|
enum dma_slave_buswidth dst_addr_width)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 src_val, dst_val;
|
||||||
|
|
||||||
if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
|
if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
|
||||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||||
|
if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
|
||||||
|
dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||||
|
|
||||||
val = ffs(addr_width) - 1;
|
src_val = ffs(src_addr_width) - 1;
|
||||||
return val | (val << 8);
|
dst_val = ffs(dst_addr_width) - 1;
|
||||||
|
return dst_val | (src_val << 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
|
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
|
||||||
|
|
@ -612,13 +616,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
|
||||||
|
|
||||||
dma_buf_next = dma_addr;
|
dma_buf_next = dma_addr;
|
||||||
if (direction == DMA_MEM_TO_DEV) {
|
if (direction == DMA_MEM_TO_DEV) {
|
||||||
|
if (!fsl_chan->cfg.src_addr_width)
|
||||||
|
fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
|
||||||
fsl_chan->attr =
|
fsl_chan->attr =
|
||||||
fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
|
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
|
||||||
|
fsl_chan->cfg.dst_addr_width);
|
||||||
nbytes = fsl_chan->cfg.dst_addr_width *
|
nbytes = fsl_chan->cfg.dst_addr_width *
|
||||||
fsl_chan->cfg.dst_maxburst;
|
fsl_chan->cfg.dst_maxburst;
|
||||||
} else {
|
} else {
|
||||||
|
if (!fsl_chan->cfg.dst_addr_width)
|
||||||
|
fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
|
||||||
fsl_chan->attr =
|
fsl_chan->attr =
|
||||||
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
|
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
|
||||||
|
fsl_chan->cfg.dst_addr_width);
|
||||||
nbytes = fsl_chan->cfg.src_addr_width *
|
nbytes = fsl_chan->cfg.src_addr_width *
|
||||||
fsl_chan->cfg.src_maxburst;
|
fsl_chan->cfg.src_maxburst;
|
||||||
}
|
}
|
||||||
|
|
@ -689,13 +699,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||||
fsl_desc->dirn = direction;
|
fsl_desc->dirn = direction;
|
||||||
|
|
||||||
if (direction == DMA_MEM_TO_DEV) {
|
if (direction == DMA_MEM_TO_DEV) {
|
||||||
|
if (!fsl_chan->cfg.src_addr_width)
|
||||||
|
fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
|
||||||
fsl_chan->attr =
|
fsl_chan->attr =
|
||||||
fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
|
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
|
||||||
|
fsl_chan->cfg.dst_addr_width);
|
||||||
nbytes = fsl_chan->cfg.dst_addr_width *
|
nbytes = fsl_chan->cfg.dst_addr_width *
|
||||||
fsl_chan->cfg.dst_maxburst;
|
fsl_chan->cfg.dst_maxburst;
|
||||||
} else {
|
} else {
|
||||||
|
if (!fsl_chan->cfg.dst_addr_width)
|
||||||
|
fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
|
||||||
fsl_chan->attr =
|
fsl_chan->attr =
|
||||||
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
|
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
|
||||||
|
fsl_chan->cfg.dst_addr_width);
|
||||||
nbytes = fsl_chan->cfg.src_addr_width *
|
nbytes = fsl_chan->cfg.src_addr_width *
|
||||||
fsl_chan->cfg.src_maxburst;
|
fsl_chan->cfg.src_maxburst;
|
||||||
}
|
}
|
||||||
|
|
@ -766,6 +782,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||||
{
|
{
|
||||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||||
struct fsl_edma_desc *fsl_desc;
|
struct fsl_edma_desc *fsl_desc;
|
||||||
|
u32 src_bus_width, dst_bus_width;
|
||||||
|
|
||||||
|
src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1));
|
||||||
|
dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1));
|
||||||
|
|
||||||
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
|
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
|
||||||
if (!fsl_desc)
|
if (!fsl_desc)
|
||||||
|
|
@ -778,8 +798,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||||
|
|
||||||
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
||||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||||
fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
|
fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
|
||||||
32, len, 0, 1, 1, 32, 0, true, true, false);
|
src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true,
|
||||||
|
true, false);
|
||||||
|
|
||||||
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -999,6 +999,5 @@ static void __exit fsl_edma_exit(void)
|
||||||
}
|
}
|
||||||
module_exit(fsl_edma_exit);
|
module_exit(fsl_edma_exit);
|
||||||
|
|
||||||
MODULE_ALIAS("platform:fsl-edma");
|
|
||||||
MODULE_DESCRIPTION("Freescale eDMA engine driver");
|
MODULE_DESCRIPTION("Freescale eDMA engine driver");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
|
||||||
|
|
@ -1296,6 +1296,5 @@ static struct platform_driver fsl_qdma_driver = {
|
||||||
|
|
||||||
module_platform_driver(fsl_qdma_driver);
|
module_platform_driver(fsl_qdma_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("platform:fsl-qdma");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
|
MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||||
u32 *status);
|
u32 *status);
|
||||||
static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
|
static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
|
||||||
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
||||||
|
static int idxd_wq_config_write(struct idxd_wq *wq);
|
||||||
|
|
||||||
/* Interrupt control bits */
|
/* Interrupt control bits */
|
||||||
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
|
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
|
||||||
|
|
@ -215,14 +216,28 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable WQ does not drain address translations, if WQ attributes are
|
||||||
|
* changed before translations are drained, pending translations can
|
||||||
|
* be issued using updated WQ attibutes, resulting in invalid
|
||||||
|
* translations being cached in the device translation cache.
|
||||||
|
*
|
||||||
|
* To make sure pending translations are drained before WQ
|
||||||
|
* attributes are changed, we use a WQ Drain followed by WQ Reset and
|
||||||
|
* then restore the WQ configuration.
|
||||||
|
*/
|
||||||
|
idxd_wq_drain(wq);
|
||||||
|
|
||||||
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
|
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
|
||||||
idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
|
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, &status);
|
||||||
|
|
||||||
if (status != IDXD_CMDSTS_SUCCESS) {
|
if (status != IDXD_CMDSTS_SUCCESS) {
|
||||||
dev_dbg(dev, "WQ disable failed: %#x\n", status);
|
dev_dbg(dev, "WQ reset failed: %#x\n", status);
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
idxd_wq_config_write(wq);
|
||||||
|
|
||||||
if (reset_config)
|
if (reset_config)
|
||||||
idxd_wq_disable_cleanup(wq);
|
idxd_wq_disable_cleanup(wq);
|
||||||
clear_bit(wq->id, idxd->wq_enable_map);
|
clear_bit(wq->id, idxd->wq_enable_map);
|
||||||
|
|
|
||||||
|
|
@ -1034,5 +1034,4 @@ static struct platform_driver k3_pdma_driver = {
|
||||||
module_platform_driver(k3_pdma_driver);
|
module_platform_driver(k3_pdma_driver);
|
||||||
|
|
||||||
MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
|
MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
|
||||||
MODULE_ALIAS("platform:k3dma");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
|
||||||
|
|
@ -554,7 +554,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
|
||||||
|
|
||||||
static void mmp_tdma_remove(struct platform_device *pdev)
|
static void mmp_tdma_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
if (pdev->dev.of_node)
|
|
||||||
of_dma_controller_free(pdev->dev.of_node);
|
of_dma_controller_free(pdev->dev.of_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -743,6 +742,5 @@ module_platform_driver(mmp_tdma_driver);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
|
MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
|
||||||
MODULE_ALIAS("platform:mmp-tdma");
|
|
||||||
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
|
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
|
||||||
MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
|
MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
|
||||||
|
|
|
||||||
|
|
@ -1500,7 +1500,6 @@ static const struct platform_device_id nbpf_ids[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(platform, nbpf_ids);
|
MODULE_DEVICE_TABLE(platform, nbpf_ids);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static int nbpf_runtime_suspend(struct device *dev)
|
static int nbpf_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct nbpf_device *nbpf = dev_get_drvdata(dev);
|
struct nbpf_device *nbpf = dev_get_drvdata(dev);
|
||||||
|
|
@ -1513,17 +1512,16 @@ static int nbpf_runtime_resume(struct device *dev)
|
||||||
struct nbpf_device *nbpf = dev_get_drvdata(dev);
|
struct nbpf_device *nbpf = dev_get_drvdata(dev);
|
||||||
return clk_prepare_enable(nbpf->clk);
|
return clk_prepare_enable(nbpf->clk);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct dev_pm_ops nbpf_pm_ops = {
|
static const struct dev_pm_ops nbpf_pm_ops = {
|
||||||
SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
|
RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver nbpf_driver = {
|
static struct platform_driver nbpf_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "dma-nbpf",
|
.name = "dma-nbpf",
|
||||||
.of_match_table = nbpf_match,
|
.of_match_table = nbpf_match,
|
||||||
.pm = &nbpf_pm_ops,
|
.pm = pm_ptr(&nbpf_pm_ops),
|
||||||
},
|
},
|
||||||
.id_table = nbpf_ids,
|
.id_table = nbpf_ids,
|
||||||
.probe = nbpf_probe,
|
.probe = nbpf_probe,
|
||||||
|
|
|
||||||
|
|
@ -1619,7 +1619,8 @@ gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
|
static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
|
||||||
struct scatterlist *sgl, enum dma_transfer_direction direction)
|
struct scatterlist *sgl, enum dma_transfer_direction direction,
|
||||||
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
struct gpi_i2c_config *i2c = chan->config;
|
struct gpi_i2c_config *i2c = chan->config;
|
||||||
struct device *dev = chan->gpii->gpi_dev->dev;
|
struct device *dev = chan->gpii->gpi_dev->dev;
|
||||||
|
|
@ -1684,6 +1685,9 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
|
||||||
|
|
||||||
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
|
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
|
||||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
|
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
|
||||||
|
|
||||||
|
if (!(flags & DMA_PREP_INTERRUPT))
|
||||||
|
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_BEI);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < tre_idx; i++)
|
for (i = 0; i < tre_idx; i++)
|
||||||
|
|
@ -1827,6 +1831,9 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(flags & DMA_PREP_INTERRUPT) && (nr - nr_tre < 2))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
|
gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
|
||||||
if (!gpi_desc)
|
if (!gpi_desc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -1835,7 +1842,7 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
if (gchan->protocol == QCOM_GPI_SPI) {
|
if (gchan->protocol == QCOM_GPI_SPI) {
|
||||||
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
|
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
|
||||||
} else if (gchan->protocol == QCOM_GPI_I2C) {
|
} else if (gchan->protocol == QCOM_GPI_I2C) {
|
||||||
i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
|
i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction, flags);
|
||||||
} else {
|
} else {
|
||||||
dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
|
dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
|
||||||
kfree(gpi_desc);
|
kfree(gpi_desc);
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
|
||||||
|
|
||||||
config RZ_DMAC
|
config RZ_DMAC
|
||||||
tristate "Renesas RZ DMA Controller"
|
tristate "Renesas RZ DMA Controller"
|
||||||
depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST
|
depends on ARCH_RENESAS || COMPILE_TEST
|
||||||
select RENESAS_DMA
|
select RENESAS_DMA
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
|
|
|
||||||
|
|
@ -1728,19 +1728,12 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
||||||
* Power management
|
* Power management
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static int rcar_dmac_runtime_suspend(struct device *dev)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int rcar_dmac_runtime_resume(struct device *dev)
|
static int rcar_dmac_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct rcar_dmac *dmac = dev_get_drvdata(dev);
|
struct rcar_dmac *dmac = dev_get_drvdata(dev);
|
||||||
|
|
||||||
return rcar_dmac_init(dmac);
|
return rcar_dmac_init(dmac);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct dev_pm_ops rcar_dmac_pm = {
|
static const struct dev_pm_ops rcar_dmac_pm = {
|
||||||
/*
|
/*
|
||||||
|
|
@ -1748,10 +1741,9 @@ static const struct dev_pm_ops rcar_dmac_pm = {
|
||||||
* - Wait for the current transfer to complete and stop the device,
|
* - Wait for the current transfer to complete and stop the device,
|
||||||
* - Resume transfers, if any.
|
* - Resume transfers, if any.
|
||||||
*/
|
*/
|
||||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
pm_runtime_force_resume)
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
|
RUNTIME_PM_OPS(NULL, rcar_dmac_runtime_resume, NULL)
|
||||||
NULL)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
|
|
@ -2036,7 +2028,7 @@ MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
|
||||||
|
|
||||||
static struct platform_driver rcar_dmac_driver = {
|
static struct platform_driver rcar_dmac_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.pm = &rcar_dmac_pm,
|
.pm = pm_ptr(&rcar_dmac_pm),
|
||||||
.name = "rcar-dmac",
|
.name = "rcar-dmac",
|
||||||
.of_match_table = rcar_dmac_of_ids,
|
.of_match_table = rcar_dmac_of_ids,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -670,7 +670,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
||||||
* Power management
|
* Power management
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static int usb_dmac_runtime_suspend(struct device *dev)
|
static int usb_dmac_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct usb_dmac *dmac = dev_get_drvdata(dev);
|
struct usb_dmac *dmac = dev_get_drvdata(dev);
|
||||||
|
|
@ -691,13 +690,11 @@ static int usb_dmac_runtime_resume(struct device *dev)
|
||||||
|
|
||||||
return usb_dmac_init(dmac);
|
return usb_dmac_init(dmac);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PM */
|
|
||||||
|
|
||||||
static const struct dev_pm_ops usb_dmac_pm = {
|
static const struct dev_pm_ops usb_dmac_pm = {
|
||||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||||
pm_runtime_force_resume)
|
pm_runtime_force_resume)
|
||||||
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
|
RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, NULL)
|
||||||
NULL)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
|
|
@ -894,7 +891,7 @@ MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
|
||||||
|
|
||||||
static struct platform_driver usb_dmac_driver = {
|
static struct platform_driver usb_dmac_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.pm = &usb_dmac_pm,
|
.pm = pm_ptr(&usb_dmac_pm),
|
||||||
.name = "usb-dmac",
|
.name = "usb-dmac",
|
||||||
.of_match_table = usb_dmac_of_ids,
|
.of_match_table = usb_dmac_of_ids,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -1311,4 +1311,3 @@ MODULE_LICENSE("GPL v2");
|
||||||
MODULE_DESCRIPTION("DMA driver for Spreadtrum");
|
MODULE_DESCRIPTION("DMA driver for Spreadtrum");
|
||||||
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
|
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
|
||||||
MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
|
MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
|
||||||
MODULE_ALIAS("platform:sprd-dma");
|
|
||||||
|
|
|
||||||
|
|
@ -866,4 +866,3 @@ MODULE_LICENSE("GPL v2");
|
||||||
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
|
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
|
||||||
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
|
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
|
||||||
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
|
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
|
||||||
MODULE_ALIAS("platform:" DRIVER_NAME);
|
|
||||||
|
|
|
||||||
|
|
@ -1230,7 +1230,6 @@ static struct platform_driver tegra_admac_driver = {
|
||||||
|
|
||||||
module_platform_driver(tegra_admac_driver);
|
module_platform_driver(tegra_admac_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("platform:tegra210-adma");
|
|
||||||
MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
|
MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
|
||||||
MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
|
MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
|
||||||
MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
|
MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
|
||||||
|
|
|
||||||
|
|
@ -77,6 +77,25 @@ enum geni_i2c_err_code {
|
||||||
#define XFER_TIMEOUT HZ
|
#define XFER_TIMEOUT HZ
|
||||||
#define RST_TIMEOUT HZ
|
#define RST_TIMEOUT HZ
|
||||||
|
|
||||||
|
#define QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct geni_i2c_gpi_multi_desc_xfer - Structure for multi transfer support
|
||||||
|
*
|
||||||
|
* @msg_idx_cnt: Current message index being processed in the transfer
|
||||||
|
* @unmap_msg_cnt: Number of messages that have been unmapped
|
||||||
|
* @irq_cnt: Number of transfer completion interrupts received
|
||||||
|
* @dma_buf: Array of virtual addresses for DMA-safe buffers
|
||||||
|
* @dma_addr: Array of DMA addresses corresponding to the buffers
|
||||||
|
*/
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer {
|
||||||
|
u32 msg_idx_cnt;
|
||||||
|
u32 unmap_msg_cnt;
|
||||||
|
u32 irq_cnt;
|
||||||
|
void **dma_buf;
|
||||||
|
dma_addr_t *dma_addr;
|
||||||
|
};
|
||||||
|
|
||||||
struct geni_i2c_dev {
|
struct geni_i2c_dev {
|
||||||
struct geni_se se;
|
struct geni_se se;
|
||||||
u32 tx_wm;
|
u32 tx_wm;
|
||||||
|
|
@ -99,6 +118,9 @@ struct geni_i2c_dev {
|
||||||
struct dma_chan *rx_c;
|
struct dma_chan *rx_c;
|
||||||
bool gpi_mode;
|
bool gpi_mode;
|
||||||
bool abort_done;
|
bool abort_done;
|
||||||
|
bool is_tx_multi_desc_xfer;
|
||||||
|
u32 num_msgs;
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer i2c_multi_desc_config;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct geni_i2c_desc {
|
struct geni_i2c_desc {
|
||||||
|
|
@ -499,6 +521,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||||
static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
|
static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
|
||||||
{
|
{
|
||||||
struct geni_i2c_dev *gi2c = cb;
|
struct geni_i2c_dev *gi2c = cb;
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
|
||||||
|
|
||||||
if (result->result != DMA_TRANS_NOERROR) {
|
if (result->result != DMA_TRANS_NOERROR) {
|
||||||
dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
|
dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
|
||||||
|
|
@ -507,6 +530,11 @@ static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
|
||||||
dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
|
dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gi2c->is_tx_multi_desc_xfer) {
|
||||||
|
tx_multi_xfer = &gi2c->i2c_multi_desc_config;
|
||||||
|
tx_multi_xfer->irq_cnt++;
|
||||||
|
}
|
||||||
|
|
||||||
complete(&gi2c->done);
|
complete(&gi2c->done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -525,7 +553,72 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
/**
|
||||||
|
* geni_i2c_gpi_multi_desc_unmap() - Unmaps DMA buffers post multi message TX transfers
|
||||||
|
* @gi2c: I2C dev handle
|
||||||
|
* @msgs: Array of I2C messages
|
||||||
|
* @peripheral: Pointer to gpi_i2c_config
|
||||||
|
*/
|
||||||
|
static void geni_i2c_gpi_multi_desc_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
|
||||||
|
struct gpi_i2c_config *peripheral)
|
||||||
|
{
|
||||||
|
u32 msg_xfer_cnt, wr_idx = 0;
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer = &gi2c->i2c_multi_desc_config;
|
||||||
|
|
||||||
|
msg_xfer_cnt = gi2c->err ? tx_multi_xfer->msg_idx_cnt : tx_multi_xfer->irq_cnt;
|
||||||
|
|
||||||
|
/* Unmap the processed DMA buffers based on the received interrupt count */
|
||||||
|
for (; tx_multi_xfer->unmap_msg_cnt < msg_xfer_cnt; tx_multi_xfer->unmap_msg_cnt++) {
|
||||||
|
wr_idx = tx_multi_xfer->unmap_msg_cnt;
|
||||||
|
geni_i2c_gpi_unmap(gi2c, &msgs[wr_idx],
|
||||||
|
tx_multi_xfer->dma_buf[wr_idx],
|
||||||
|
tx_multi_xfer->dma_addr[wr_idx],
|
||||||
|
NULL, 0);
|
||||||
|
|
||||||
|
if (tx_multi_xfer->unmap_msg_cnt == gi2c->num_msgs - 1) {
|
||||||
|
kfree(tx_multi_xfer->dma_buf);
|
||||||
|
kfree(tx_multi_xfer->dma_addr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* geni_i2c_gpi_multi_xfer_timeout_handler() - Handles multi message transfer timeout
|
||||||
|
* @dev: Pointer to the corresponding dev node
|
||||||
|
* @multi_xfer: Pointer to the geni_i2c_gpi_multi_desc_xfer
|
||||||
|
* @transfer_timeout_msecs: Timeout value in milliseconds
|
||||||
|
* @transfer_comp: Completion object of the transfer
|
||||||
|
*
|
||||||
|
* This function waits for the completion of each processed transfer messages
|
||||||
|
* based on the interrupts generated upon transfer completion.
|
||||||
|
*
|
||||||
|
* Return: On success returns 0, -ETIMEDOUT on timeout.
|
||||||
|
*/
|
||||||
|
static int geni_i2c_gpi_multi_xfer_timeout_handler(struct device *dev,
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer *multi_xfer,
|
||||||
|
u32 transfer_timeout_msecs,
|
||||||
|
struct completion *transfer_comp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 time_left;
|
||||||
|
|
||||||
|
for (i = 0; i < multi_xfer->msg_idx_cnt - 1; i++) {
|
||||||
|
reinit_completion(transfer_comp);
|
||||||
|
|
||||||
|
if (multi_xfer->msg_idx_cnt != multi_xfer->irq_cnt) {
|
||||||
|
time_left = wait_for_completion_timeout(transfer_comp,
|
||||||
|
transfer_timeout_msecs);
|
||||||
|
if (!time_left) {
|
||||||
|
dev_err(dev, "%s: Transfer timeout\n", __func__);
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
|
||||||
struct dma_slave_config *config, dma_addr_t *dma_addr_p,
|
struct dma_slave_config *config, dma_addr_t *dma_addr_p,
|
||||||
void **buf, unsigned int op, struct dma_chan *dma_chan)
|
void **buf, unsigned int op, struct dma_chan *dma_chan)
|
||||||
{
|
{
|
||||||
|
|
@ -537,26 +630,45 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||||
enum dma_transfer_direction dma_dirn;
|
enum dma_transfer_direction dma_dirn;
|
||||||
struct dma_async_tx_descriptor *desc;
|
struct dma_async_tx_descriptor *desc;
|
||||||
int ret;
|
int ret;
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer *gi2c_gpi_xfer;
|
||||||
|
dma_cookie_t cookie;
|
||||||
|
u32 msg_idx;
|
||||||
|
|
||||||
peripheral = config->peripheral_config;
|
peripheral = config->peripheral_config;
|
||||||
|
gi2c_gpi_xfer = &gi2c->i2c_multi_desc_config;
|
||||||
|
msg_idx = gi2c_gpi_xfer->msg_idx_cnt;
|
||||||
|
|
||||||
dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
|
dma_buf = i2c_get_dma_safe_msg_buf(&msgs[msg_idx], 1);
|
||||||
if (!dma_buf)
|
if (!dma_buf) {
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (op == I2C_WRITE)
|
if (op == I2C_WRITE)
|
||||||
map_dirn = DMA_TO_DEVICE;
|
map_dirn = DMA_TO_DEVICE;
|
||||||
else
|
else
|
||||||
map_dirn = DMA_FROM_DEVICE;
|
map_dirn = DMA_FROM_DEVICE;
|
||||||
|
|
||||||
addr = dma_map_single(gi2c->se.dev->parent, dma_buf, msg->len, map_dirn);
|
addr = dma_map_single(gi2c->se.dev->parent, dma_buf,
|
||||||
|
msgs[msg_idx].len, map_dirn);
|
||||||
if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
|
if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
|
||||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
|
i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gi2c->is_tx_multi_desc_xfer) {
|
||||||
|
flags = DMA_CTRL_ACK;
|
||||||
|
|
||||||
|
/* BEI bit to be cleared for last TRE */
|
||||||
|
if (msg_idx == gi2c->num_msgs - 1)
|
||||||
|
flags |= DMA_PREP_INTERRUPT;
|
||||||
|
} else {
|
||||||
|
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set the length as message for rx txn */
|
/* set the length as message for rx txn */
|
||||||
peripheral->rx_len = msg->len;
|
peripheral->rx_len = msgs[msg_idx].len;
|
||||||
peripheral->op = op;
|
peripheral->op = op;
|
||||||
|
|
||||||
ret = dmaengine_slave_config(dma_chan, config);
|
ret = dmaengine_slave_config(dma_chan, config);
|
||||||
|
|
@ -567,14 +679,21 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||||
|
|
||||||
peripheral->set_config = 0;
|
peripheral->set_config = 0;
|
||||||
peripheral->multi_msg = true;
|
peripheral->multi_msg = true;
|
||||||
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
|
|
||||||
|
|
||||||
if (op == I2C_WRITE)
|
if (op == I2C_WRITE)
|
||||||
dma_dirn = DMA_MEM_TO_DEV;
|
dma_dirn = DMA_MEM_TO_DEV;
|
||||||
else
|
else
|
||||||
dma_dirn = DMA_DEV_TO_MEM;
|
dma_dirn = DMA_DEV_TO_MEM;
|
||||||
|
|
||||||
desc = dmaengine_prep_slave_single(dma_chan, addr, msg->len, dma_dirn, flags);
|
desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
|
||||||
|
dma_dirn, flags);
|
||||||
|
if (!desc && !(flags & DMA_PREP_INTERRUPT)) {
|
||||||
|
/* Retry with interrupt if not enough TREs */
|
||||||
|
flags |= DMA_PREP_INTERRUPT;
|
||||||
|
desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
|
||||||
|
dma_dirn, flags);
|
||||||
|
}
|
||||||
|
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
|
dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
|
@ -584,15 +703,48 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||||
desc->callback_result = i2c_gpi_cb_result;
|
desc->callback_result = i2c_gpi_cb_result;
|
||||||
desc->callback_param = gi2c;
|
desc->callback_param = gi2c;
|
||||||
|
|
||||||
dmaengine_submit(desc);
|
if (!((msgs[msg_idx].flags & I2C_M_RD) && op == I2C_WRITE))
|
||||||
|
gi2c_gpi_xfer->msg_idx_cnt++;
|
||||||
|
|
||||||
|
cookie = dmaengine_submit(desc);
|
||||||
|
if (dma_submit_error(cookie)) {
|
||||||
|
dev_err(gi2c->se.dev,
|
||||||
|
"%s: dmaengine_submit failed (%d)\n", __func__, cookie);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto err_config;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gi2c->is_tx_multi_desc_xfer) {
|
||||||
|
gi2c_gpi_xfer->dma_buf[msg_idx] = dma_buf;
|
||||||
|
gi2c_gpi_xfer->dma_addr[msg_idx] = addr;
|
||||||
|
|
||||||
|
dma_async_issue_pending(gi2c->tx_c);
|
||||||
|
|
||||||
|
if ((msg_idx == (gi2c->num_msgs - 1)) || flags & DMA_PREP_INTERRUPT) {
|
||||||
|
ret = geni_i2c_gpi_multi_xfer_timeout_handler(gi2c->se.dev, gi2c_gpi_xfer,
|
||||||
|
XFER_TIMEOUT, &gi2c->done);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(gi2c->se.dev,
|
||||||
|
"I2C multi write msg transfer timeout: %d\n",
|
||||||
|
ret);
|
||||||
|
gi2c->err = ret;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Non multi descriptor message transfer */
|
||||||
*buf = dma_buf;
|
*buf = dma_buf;
|
||||||
*dma_addr_p = addr;
|
*dma_addr_p = addr;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_config:
|
err_config:
|
||||||
dma_unmap_single(gi2c->se.dev->parent, addr, msg->len, map_dirn);
|
dma_unmap_single(gi2c->se.dev->parent, addr,
|
||||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
|
msgs[msg_idx].len, map_dirn);
|
||||||
|
i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
|
||||||
|
|
||||||
|
out:
|
||||||
|
gi2c->err = ret;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -604,6 +756,7 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
|
||||||
unsigned long time_left;
|
unsigned long time_left;
|
||||||
dma_addr_t tx_addr, rx_addr;
|
dma_addr_t tx_addr, rx_addr;
|
||||||
void *tx_buf = NULL, *rx_buf = NULL;
|
void *tx_buf = NULL, *rx_buf = NULL;
|
||||||
|
struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
|
||||||
const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
|
const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
|
||||||
|
|
||||||
config.peripheral_config = &peripheral;
|
config.peripheral_config = &peripheral;
|
||||||
|
|
@ -617,6 +770,41 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
|
||||||
peripheral.set_config = 1;
|
peripheral.set_config = 1;
|
||||||
peripheral.multi_msg = false;
|
peripheral.multi_msg = false;
|
||||||
|
|
||||||
|
gi2c->num_msgs = num;
|
||||||
|
gi2c->is_tx_multi_desc_xfer = false;
|
||||||
|
|
||||||
|
tx_multi_xfer = &gi2c->i2c_multi_desc_config;
|
||||||
|
memset(tx_multi_xfer, 0, sizeof(struct geni_i2c_gpi_multi_desc_xfer));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If number of write messages are two and higher then
|
||||||
|
* configure hardware for multi descriptor transfers with BEI.
|
||||||
|
*/
|
||||||
|
if (num >= QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC) {
|
||||||
|
gi2c->is_tx_multi_desc_xfer = true;
|
||||||
|
for (i = 0; i < num; i++) {
|
||||||
|
if (msgs[i].flags & I2C_M_RD) {
|
||||||
|
/*
|
||||||
|
* Multi descriptor transfer with BEI
|
||||||
|
* support is enabled for write transfers.
|
||||||
|
* TODO: Add BEI optimization support for
|
||||||
|
* read transfers later.
|
||||||
|
*/
|
||||||
|
gi2c->is_tx_multi_desc_xfer = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gi2c->is_tx_multi_desc_xfer) {
|
||||||
|
tx_multi_xfer->dma_buf = kcalloc(num, sizeof(void *), GFP_KERNEL);
|
||||||
|
tx_multi_xfer->dma_addr = kcalloc(num, sizeof(dma_addr_t), GFP_KERNEL);
|
||||||
|
if (!tx_multi_xfer->dma_buf || !tx_multi_xfer->dma_addr) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
gi2c->cur = &msgs[i];
|
gi2c->cur = &msgs[i];
|
||||||
gi2c->err = 0;
|
gi2c->err = 0;
|
||||||
|
|
@ -627,14 +815,16 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
|
||||||
peripheral.stretch = 1;
|
peripheral.stretch = 1;
|
||||||
|
|
||||||
peripheral.addr = msgs[i].addr;
|
peripheral.addr = msgs[i].addr;
|
||||||
|
if (i > 0 && (!(msgs[i].flags & I2C_M_RD)))
|
||||||
|
peripheral.multi_msg = false;
|
||||||
|
|
||||||
ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
|
ret = geni_i2c_gpi(gi2c, msgs, &config,
|
||||||
&tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
|
&tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (msgs[i].flags & I2C_M_RD) {
|
if (msgs[i].flags & I2C_M_RD) {
|
||||||
ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
|
ret = geni_i2c_gpi(gi2c, msgs, &config,
|
||||||
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
|
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
@ -642,18 +832,24 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
|
||||||
dma_async_issue_pending(gi2c->rx_c);
|
dma_async_issue_pending(gi2c->rx_c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!gi2c->is_tx_multi_desc_xfer) {
|
||||||
dma_async_issue_pending(gi2c->tx_c);
|
dma_async_issue_pending(gi2c->tx_c);
|
||||||
|
|
||||||
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
|
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
|
||||||
if (!time_left)
|
if (!time_left) {
|
||||||
|
dev_err(gi2c->se.dev, "%s:I2C timeout\n", __func__);
|
||||||
gi2c->err = -ETIMEDOUT;
|
gi2c->err = -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (gi2c->err) {
|
if (gi2c->err) {
|
||||||
ret = gi2c->err;
|
ret = gi2c->err;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!gi2c->is_tx_multi_desc_xfer)
|
||||||
geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
|
geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
|
||||||
|
else if (tx_multi_xfer->unmap_msg_cnt != tx_multi_xfer->irq_cnt)
|
||||||
|
geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
|
||||||
}
|
}
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
|
|
@ -662,7 +858,11 @@ err:
|
||||||
dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
|
dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
|
||||||
dmaengine_terminate_sync(gi2c->rx_c);
|
dmaengine_terminate_sync(gi2c->rx_c);
|
||||||
dmaengine_terminate_sync(gi2c->tx_c);
|
dmaengine_terminate_sync(gi2c->tx_c);
|
||||||
|
if (gi2c->is_tx_multi_desc_xfer)
|
||||||
|
geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
|
||||||
|
else
|
||||||
geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
|
geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue