dmaengine updates for v6.17

Core:
   - Managed API for dma channel request
 
  New support:
   - Sophgo CV18XX/SG200X dmamux driver
   - Qualcomm Milos GPI, sc8280xp GPI support
 
  Updates:
   - Conversion of brcm,iproc-sba and marvell,orion-xor binding
   - Unused code cleanup across drivers
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmiMsksACgkQfBQHDyUj
 g0dYdhAAi/ekDqoTUjGSaUAL2Bq2sMn4yxt45p6z1sBNIyCt6D1Exek6GOsNeyKt
 It99SDs/Iw+6HoEWbx46FmLYUV0/Dm/vRjMvw5feLT+XWHN4wTffQuA78w4YOzCV
 Sf1bzkt1VW5+NQQpKatGzjRwGYiFRUACASFhDZel31B1zGa8jYZLjwKdHenU3+HW
 5ItIoujGAXYawk0WKHhkwJxkN1FoNqwCiOcEmKtwekLAqHSw2C20uzBhxipeckf0
 ynW3IFRWH9LMRdYQOcLcE08Bx43xKcUM8WKvdwmein/mFY0KFV5ycGBsJGzDLi0N
 MLbgnq1FynqfVf0/eEe/+kOojoWjFahiEAn+ceVoj4QLpGFBgbmogeyxsnoUfhC9
 LsN87UMurdrLXf56/xMchb7YY8NOF6DguebZVUfEi6oZ3bDGtMj/MzEOS5btWG1V
 qEQVkeZr7OYOMbD+LWVwl40LO41kFfmbKd1SsCHa1ri1qWiMUwBEhrFnT+eYkFtA
 u2ep/LN5OGKFTkrBYQvVvnvTtVN69aNCKJDuOpQvMAY/rxGWy8VF3D0B0XQTul/8
 RTVmyIwGEk+nZ9d+TAMJYb/gU1mzgH7v3qAqXQa+zrF3JImwxOMU3ryFJoIRk17h
 /3YFJIL8q+HS/NLjEQW7VSIdVGOLiffbkjAWlcvddVSuINg3ESY=
 =j7pf
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "Core:

   - Managed API for dma channel request

  New support:

   - Sophgo CV18XX/SG200X dmamux driver

   - Qualcomm Milos GPI, sc8280xp GPI support

  Updates:

   - Conversion of brcm,iproc-sba and marvell,orion-xor binding

   - Unused code cleanup across drivers"

* tag 'dmaengine-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (23 commits)
  dt-bindings: dma: fsl-mxs-dma: allow interrupt-names for fsl,imx23-dma-apbx
  dmaengine: xdmac: make it selectable for ARCH_MICROCHIP
  dt-bindings: dma: Convert marvell,orion-xor to DT schema
  dt-bindings: dma: Convert brcm,iproc-sba to DT schema
  dmaengine: nbpfaxi: Add missing check after DMA map
  dmaengine: mv_xor: Fix missing check after DMA map and missing unmap
  dt-bindings: dma: qcom,gpi: document the Milos GPI DMA Engine
  dmaengine: idxd: Remove __packed from structures
  dmaengine: ti: Do not enable by default during compile testing
  dmaengine: sh: Do not enable SH_DMAE_BASE by default during compile testing
  dmaengine: idxd: Fix warning for deadcode.deadstore
  dmaengine: mmp: Fix again Wvoid-pointer-to-enum-cast warning
  dmaengine: fsl-qdma: Add missing fsl_qdma_format kerneldoc
  dmaengine: qcom: gpi: Drop unused gpi_write_reg_field()
  dmaengine: fsl-dpaa2-qdma: Drop unused mc_enc()
  dmaengine: dw-edma: Drop unused dchan2dev() and chan2dev()
  dmaengine: stm32: Don't use %pK through printk
  dmaengine: stm32-dma: configure next sg only if there are more than 2 sgs
  dmaengine: sun4i: Simplify error handling in probe()
  dt-bindings: dma: qcom,gpi: Document the sc8280xp GPI DMA engine
  ...
pull/1320/head
Linus Torvalds 2025-08-01 12:35:12 -07:00
commit 196dacf454
25 changed files with 578 additions and 184 deletions

View File

@ -1,29 +0,0 @@
* Broadcom SBA RAID engine
Required properties:
- compatible: Should be one of the following
"brcm,iproc-sba"
"brcm,iproc-sba-v2"
The "brcm,iproc-sba" has support for only 6 PQ coefficients
The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients
- mboxes: List of phandle and mailbox channel specifiers
Example:
raid_mbox: mbox@67400000 {
...
#mbox-cells = <3>;
...
};
raid0 {
compatible = "brcm,iproc-sba-v2";
mboxes = <&raid_mbox 0 0x1 0xffff>,
<&raid_mbox 1 0x1 0xffff>,
<&raid_mbox 2 0x1 0xffff>,
<&raid_mbox 3 0x1 0xffff>,
<&raid_mbox 4 0x1 0xffff>,
<&raid_mbox 5 0x1 0xffff>,
<&raid_mbox 6 0x1 0xffff>,
<&raid_mbox 7 0x1 0xffff>;
};

View File

@ -0,0 +1,41 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/brcm,iproc-sba.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom SBA RAID engine
maintainers:
- Ray Jui <rjui@broadcom.com>
- Scott Branden <sbranden@broadcom.com>
properties:
compatible:
enum:
- brcm,iproc-sba
- brcm,iproc-sba-v2
mboxes:
minItems: 1
maxItems: 8
required:
- compatible
- mboxes
additionalProperties: false
examples:
- |
raid0 {
compatible = "brcm,iproc-sba-v2";
mboxes = <&raid_mbox 0 0x1 0xffff>,
<&raid_mbox 1 0x1 0xffff>,
<&raid_mbox 2 0x1 0xffff>,
<&raid_mbox 3 0x1 0xffff>,
<&raid_mbox 4 0x1 0xffff>,
<&raid_mbox 5 0x1 0xffff>,
<&raid_mbox 6 0x1 0xffff>,
<&raid_mbox 7 0x1 0xffff>;
};

View File

@ -23,6 +23,35 @@ allOf:
properties:
power-domains: false
- if:
properties:
compatible:
contains:
const: fsl,imx23-dma-apbx
then:
properties:
interrupt-names:
items:
- const: audio-adc
- const: audio-dac
- const: spdif-tx
- const: i2c
- const: saif0
- const: empty0
- const: auart0-rx
- const: auart0-tx
- const: auart1-rx
- const: auart1-tx
- const: saif1
- const: empty1
- const: empty2
- const: empty3
- const: empty4
- const: empty5
else:
properties:
interrupt-names: false
properties:
compatible:
oneOf:
@ -54,6 +83,10 @@ properties:
minItems: 4
maxItems: 16
interrupt-names:
minItems: 4
maxItems: 16
"#dma-cells":
const: 1

View File

@ -0,0 +1,84 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/marvell,orion-xor.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell XOR engine
maintainers:
- Andrew Lunn <andrew@lunn.ch>
- Gregory Clement <gregory.clement@bootlin.com>
properties:
compatible:
oneOf:
- items:
- const: marvell,armada-380-xor
- const: marvell,orion-xor
- enum:
- marvell,armada-3700-xor
- marvell,orion-xor
reg:
items:
- description: Low registers for the XOR engine
- description: High registers for the XOR engine
clocks:
maxItems: 1
patternProperties:
"^(channel|xor)[0-9]+$":
description: XOR channel sub-node
type: object
additionalProperties: false
properties:
interrupts:
description: Interrupt specifier for the XOR channel
items:
- description: Interrupt for this channel
dmacap,memcpy:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of memcpy operations
dmacap,memset:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of memset operations
dmacap,xor:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of xor operations
required:
- interrupts
required:
- compatible
- reg
additionalProperties: false
examples:
- |
xor@d0060900 {
compatible = "marvell,orion-xor";
reg = <0xd0060900 0x100>,
<0xd0060b00 0x100>;
clocks = <&coreclk 0>;
xor00 {
interrupts = <51>;
};
xor01 {
interrupts = <52>;
};
};

View File

@ -1,40 +0,0 @@
* Marvell XOR engines
Required properties:
- compatible: Should be one of the following:
- "marvell,orion-xor"
- "marvell,armada-380-xor"
- "marvell,armada-3700-xor".
- reg: Should contain registers location and length (two sets)
the first set is the low registers, the second set the high
registers for the XOR engine.
- clocks: pointer to the reference clock
The DT node must also contains sub-nodes for each XOR channel that the
XOR engine has. Those sub-nodes have the following required
properties:
- interrupts: interrupt of the XOR channel
The sub-nodes used to contain one or several of the following
properties, but they are now deprecated:
- dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
- dmacap,memset to indicate that the XOR channel is capable of memset operations
- dmacap,xor to indicate that the XOR channel is capable of xor operations
- dmacap,interrupt to indicate that the XOR channel is capable of
generating interrupts
Example:
xor@d0060900 {
compatible = "marvell,orion-xor";
reg = <0xd0060900 0x100
0xd0060b00 0x100>;
clocks = <&coreclk 0>;
xor00 {
interrupts = <51>;
};
xor01 {
interrupts = <52>;
};
};

View File

@ -24,12 +24,14 @@ properties:
- qcom,sm6350-gpi-dma
- items:
- enum:
- qcom,milos-gpi-dma
- qcom,qcm2290-gpi-dma
- qcom,qcs8300-gpi-dma
- qcom,qdu1000-gpi-dma
- qcom,sa8775p-gpi-dma
- qcom,sar2130p-gpi-dma
- qcom,sc7280-gpi-dma
- qcom,sc8280xp-gpi-dma
- qcom,sdx75-gpi-dma
- qcom,sm6115-gpi-dma
- qcom,sm6375-gpi-dma

View File

@ -0,0 +1,51 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/sophgo,cv1800b-dmamux.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sophgo CV1800/SG200 Series DMA multiplexer
maintainers:
- Inochi Amaoto <inochiama@gmail.com>
description:
The DMA multiplexer of CV1800 is a subdevice of the system
controller. It support mapping 8 channels, but each channel
can be mapped only once.
allOf:
- $ref: dma-router.yaml#
properties:
compatible:
const: sophgo,cv1800b-dmamux
reg:
items:
- description: DMA channal remapping register
- description: DMA channel interrupt mapping register
'#dma-cells':
const: 2
description:
The first cells is device id. The second one is the cpu id.
dma-masters:
maxItems: 1
required:
- reg
- '#dma-cells'
- dma-masters
additionalProperties: false
examples:
- |
dma-router@154 {
compatible = "sophgo,cv1800b-dmamux";
reg = <0x154 0x8>, <0x298 0x4>;
#dma-cells = <2>;
dma-masters = <&dmac>;
};

View File

@ -89,7 +89,6 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
@ -111,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
depends on ARCH_AT91
depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@ -572,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
config SOPHGO_CV1800B_DMAMUX
tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
depends on MFD_SYSCON
depends on ARCH_SOPHGO || COMPILE_TEST
help
Support for the DMA multiplexer on Sophgo CV1800/SG2000
series SoCs.
Say Y here if your board have this soc.
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500

View File

@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o

View File

@ -0,0 +1,259 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
*/
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/of_dma.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/llist.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#define REG_DMA_CHANNEL_REMAP0 0x154
#define REG_DMA_CHANNEL_REMAP1 0x158
#define REG_DMA_INT_MUX 0x298
#define DMAMUX_NCELLS 2
#define MAX_DMA_MAPPING_ID 42
#define MAX_DMA_CPU_ID 2
#define MAX_DMA_CH_ID 7
#define DMAMUX_INTMUX_REGISTER_LEN 4
#define DMAMUX_NR_CH_PER_REGISTER 4
#define DMAMUX_BIT_PER_CH 8
#define DMAMUX_CH_MASk GENMASK(5, 0)
#define DMAMUX_INT_BIT_PER_CPU 10
#define DMAMUX_CH_UPDATE_BIT BIT(31)
#define DMAMUX_CH_REGPOS(chid) \
((chid) / DMAMUX_NR_CH_PER_REGISTER)
#define DMAMUX_CH_REGOFF(chid) \
((chid) % DMAMUX_NR_CH_PER_REGISTER)
#define DMAMUX_CH_REG(chid) \
((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
REG_DMA_CHANNEL_REMAP0)
#define DMAMUX_CH_SET(chid, val) \
(((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
DMAMUX_CH_UPDATE_BIT)
#define DMAMUX_CH_MASK(chid) \
DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
#define DMAMUX_INT_BIT(chid, cpuid) \
BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
#define DMAMUX_INTEN_BIT(cpuid) \
DMAMUX_INT_BIT(8, cpuid)
#define DMAMUX_INT_CH_BIT(chid, cpuid) \
(DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
#define DMAMUX_INT_MASK(chid) \
(DMAMUX_INT_BIT(chid, 0) | \
DMAMUX_INT_BIT(chid, 1) | \
DMAMUX_INT_BIT(chid, 2))
#define DMAMUX_INT_CH_MASK(chid, cpuid) \
(DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
struct cv1800_dmamux_data {
struct dma_router dmarouter;
struct regmap *regmap;
spinlock_t lock;
struct llist_head free_maps;
struct llist_head reserve_maps;
DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
};
struct cv1800_dmamux_map {
struct llist_node node;
unsigned int channel;
unsigned int peripheral;
unsigned int cpu;
};
static void cv1800_dmamux_free(struct device *dev, void *route_data)
{
struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
struct cv1800_dmamux_map *map = route_data;
guard(spinlock_irqsave)(&dmamux->lock);
regmap_update_bits(dmamux->regmap,
DMAMUX_CH_REG(map->channel),
DMAMUX_CH_MASK(map->channel),
DMAMUX_CH_UPDATE_BIT);
regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
DMAMUX_INT_CH_MASK(map->channel, map->cpu),
DMAMUX_INTEN_BIT(map->cpu));
dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
map->channel, map->peripheral, map->cpu);
}
static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
struct cv1800_dmamux_map *map;
struct llist_node *node;
unsigned long flags;
unsigned int chid, devid, cpuid;
int ret;
if (dma_spec->args_count != DMAMUX_NCELLS) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
}
devid = dma_spec->args[0];
cpuid = dma_spec->args[1];
dma_spec->args_count = 1;
if (devid > MAX_DMA_MAPPING_ID) {
dev_err(&pdev->dev, "invalid device id: %u\n", devid);
return ERR_PTR(-EINVAL);
}
if (cpuid > MAX_DMA_CPU_ID) {
dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
return ERR_PTR(-EINVAL);
}
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
}
spin_lock_irqsave(&dmamux->lock, flags);
if (test_bit(devid, dmamux->mapped_peripherals)) {
llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
if (map->peripheral == devid && map->cpu == cpuid)
goto found;
}
ret = -EINVAL;
goto failed;
} else {
node = llist_del_first(&dmamux->free_maps);
if (!node) {
ret = -ENODEV;
goto failed;
}
map = llist_entry(node, struct cv1800_dmamux_map, node);
llist_add(&map->node, &dmamux->reserve_maps);
set_bit(devid, dmamux->mapped_peripherals);
}
found:
chid = map->channel;
map->peripheral = devid;
map->cpu = cpuid;
regmap_set_bits(dmamux->regmap,
DMAMUX_CH_REG(chid),
DMAMUX_CH_SET(chid, devid));
regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
DMAMUX_INT_CH_MASK(chid, cpuid),
DMAMUX_INT_CH_BIT(chid, cpuid));
spin_unlock_irqrestore(&dmamux->lock, flags);
dma_spec->args[0] = chid;
dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
chid, devid, cpuid);
return map;
failed:
spin_unlock_irqrestore(&dmamux->lock, flags);
of_node_put(dma_spec->np);
dev_err(&pdev->dev, "errno %d\n", ret);
return ERR_PTR(ret);
}
static int cv1800_dmamux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *mux_node = dev->of_node;
struct cv1800_dmamux_data *data;
struct cv1800_dmamux_map *tmp;
struct device *parent = dev->parent;
struct regmap *regmap = NULL;
unsigned int i;
if (!parent)
return -ENODEV;
regmap = device_node_to_regmap(parent->of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->lock);
init_llist_head(&data->free_maps);
init_llist_head(&data->reserve_maps);
for (i = 0; i <= MAX_DMA_CH_ID; i++) {
tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
/* It is OK for not allocating all channel */
dev_warn(dev, "can not allocate channel %u\n", i);
continue;
}
init_llist_node(&tmp->node);
tmp->channel = i;
llist_add(&tmp->node, &data->free_maps);
}
/* if no channel is allocated, the probe must fail */
if (llist_empty(&data->free_maps))
return -ENOMEM;
data->regmap = regmap;
data->dmarouter.dev = dev;
data->dmarouter.route_free = cv1800_dmamux_free;
platform_set_drvdata(pdev, data);
return of_dma_router_register(mux_node,
cv1800_dmamux_route_allocate,
&data->dmarouter);
}
static void cv1800_dmamux_remove(struct platform_device *pdev)
{
of_dma_controller_free(pdev->dev.of_node);
}
static const struct of_device_id cv1800_dmamux_ids[] = {
{ .compatible = "sophgo,cv1800b-dmamux", },
{ }
};
MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
static struct platform_driver cv1800_dmamux_driver = {
.probe = cv1800_dmamux_probe,
.remove = cv1800_dmamux_remove,
.driver = {
.name = "cv1800-dmamux",
.of_match_table = cv1800_dmamux_ids,
},
};
module_platform_driver(cv1800_dmamux_driver);
MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
MODULE_LICENSE("GPL");

View File

@ -23,18 +23,6 @@
#include "../dmaengine.h"
#include "../virt-dma.h"
static inline
struct device *dchan2dev(struct dma_chan *dchan)
{
return &dchan->dev->device;
}
static inline
struct device *chan2dev(struct dw_edma_chan *chan)
{
return &chan->vc.chan.dev->device;
}
static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{

View File

@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
static inline u64 mc_enc(int lsoffset, int width, u64 val)
{
return (val & MAKE_UMASK64(width)) << lsoffset;
}
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object

View File

@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
* @__reserved2: Reserved field.
* @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
* others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/

View File

@ -1036,7 +1036,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev)
const char *idxd_name;
int rc;
dev = &idxd->pdev->dev;
idxd_name = dev_name(idxd_confdev(idxd));
struct idxd_saved_states *idxd_saved __free(kfree) =

View File

@ -45,7 +45,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
} __packed;
};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@ -65,7 +65,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
} __packed;
};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@ -79,7 +79,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
} __packed;
};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@ -88,7 +88,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
} __packed;
};
#define IDXD_ENGCAP_OFFSET 0x38
@ -114,7 +114,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
} __packed;
};
#define IDXD_TABLE_MULT 0x100
@ -128,7 +128,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
} __packed;
};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@ -139,7 +139,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
} __packed;
};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@ -149,7 +149,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
} __packed;
};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@ -183,7 +183,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
} __packed;
};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@ -213,7 +213,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
} __packed;
};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@ -284,7 +284,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
} __packed;
};
union iaa_cap_reg {
struct {
@ -303,7 +303,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
} __packed;
};
#define IDXD_IAACAP_OFFSET 0x180
@ -320,7 +320,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
} __packed;
};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@ -334,7 +334,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
} __packed;
};
union group_flags {
struct {
@ -352,13 +352,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
} __packed;
};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
} __packed;
};
union wqcfg {
struct {
@ -410,7 +410,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
} __packed;
};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@ -474,7 +474,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
} __packed;
};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@ -483,7 +483,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
} __packed;
};
struct idxd_event {
union {
@ -493,7 +493,7 @@ struct idxd_event {
};
u32 val;
};
} __packed;
};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@ -506,7 +506,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
} __packed;
};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@ -516,7 +516,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
} __packed;
};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@ -533,7 +533,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
} __packed;
};
#define IDXD_FLTCFG_OFFSET 0x300
@ -543,7 +543,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
} __packed;
};
union event_cfg {
struct {
@ -551,7 +551,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
} __packed;
};
union filter_cfg {
struct {
@ -562,7 +562,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
} __packed;
};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@ -580,7 +580,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
} __packed;
};
#define IDXD_MAX_BATCH_IDENT 256
@ -620,17 +620,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
} __packed;
};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
} __packed;
};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
} __packed;
};
#endif

View File

@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);

View File

@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
return ERR_PTR(-ENOMEM);
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
ret = -ENOMEM;
goto err_unmap_src;
}
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
if (!mv_chan->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_unmap_dst;
}
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@ -1155,6 +1165,13 @@ err_free_irq:
err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
err_unmap_dst:
dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
err_unmap_src:
dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
return ERR_PTR(ret);
}

View File

@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
if (dma_mapping_error(dchan->device->dev,
ldesc->hwdesc_dma_addr))
goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
unmap_error:
while (i--) {
ldesc--; hwdesc--;
dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
sizeof(hwdesc), DMA_TO_DEVICE);
}
return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)

View File

@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
/* gpi_write_reg_field - write to specific bit field */
static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
u32 mask, u32 shift, u32 val)
{
u32 tmp = gpi_read_reg(gpii, addr);
tmp &= ~mask;
val = tmp | ((val << shift) & mask);
gpi_write_reg(gpii, addr, val);
}
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{

View File

@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.

View File

@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
else if (scr & STM32_DMA_SCR_DBM)
else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}

View File

@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}

View File

@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}

View File

@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "No clock specified\n");
return PTR_ERR(priv->clk);
}
priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
"Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't enable the clock\n");
return ret;
}
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
goto err_clk_disable;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
ret = dma_async_device_register(&priv->slave);
if (ret) {
dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
goto err_clk_disable;
}
ret = dmaenginem_async_device_register(&priv->slave);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
if (ret) {
dev_err(&pdev->dev, "of_dma_controller_register failed\n");
goto err_dma_unregister;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
err_dma_unregister:
dma_async_device_unregister(&priv->slave);
err_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&priv->slave);
clk_disable_unprepare(priv->clk);
}
static struct sun4i_dma_config sun4i_a10_dma_cfg = {

View File

@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
default y
default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
default y
default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.