iio: buffer: support getting dma channel from the buffer

Add a new buffer accessor .get_dma_dev() in order to get the
struct device responsible for actually providing the dma channel. We
cannot assume that we can use the parent of the IIO device for mapping
the DMA buffer. This becomes important on systems (like the Xilinx/AMD
zynqMP Ultrascale) where memory (or part of it) is mapped above the
32 bit range. On such systems and given that a device by default has
a dma mask of 32 bits we would then need to rely on bounce buffers (to
swiotlb) for mapping memory above the dma mask limit.

In the process, add an iio_buffer_get_dma_dev() helper function to get
the proper DMA device.

Cc: stable@vger.kernel.org
Reviewed-by: David Lechner <dlechner@baylibre.com>
Signed-off-by: Nuno Sá <nuno.sa@analog.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
pull/1354/merge
Nuno Sá 2025-10-07 10:15:21 +01:00 committed by Jonathan Cameron
parent 0bf1bfde53
commit a514bb109e
2 changed files with 18 additions and 5 deletions

View File

@ -1623,19 +1623,28 @@ static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
return 0;
}
static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
if (buffer->access->get_dma_dev)
return buffer->access->get_dma_dev(buffer);
return indio_dev->dev.parent;
}
static struct dma_buf_attachment *
iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
struct dma_buf *dmabuf, bool nonblock)
{
struct device *dev = ib->indio_dev->dev.parent;
struct iio_buffer *buffer = ib->buffer;
struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer);
struct dma_buf_attachment *attach = NULL;
struct iio_dmabuf_priv *priv;
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
if (priv->attach->dev == dev
if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
attach = priv->attach;
break;
@ -1653,6 +1662,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
{
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_buffer *buffer = ib->buffer;
struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct dma_buf_attachment *attach;
struct iio_dmabuf_priv *priv, *each;
struct dma_buf *dmabuf;
@ -1679,7 +1689,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
goto err_free_priv;
}
attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
attach = dma_buf_attach(dmabuf, dma_dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto err_dmabuf_put;
@ -1719,7 +1729,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
* combo. If we do, refuse to attach.
*/
list_for_each_entry(each, &buffer->dmabufs, entry) {
if (each->attach->dev == indio_dev->dev.parent
if (each->attach->dev == dma_dev
&& each->attach->dmabuf == dmabuf) {
/*
* We unlocked the reservation object, so going through
@ -1758,6 +1768,7 @@ static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
{
struct iio_buffer *buffer = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct iio_dmabuf_priv *priv;
struct dma_buf *dmabuf;
int dmabuf_fd, ret = -EPERM;
@ -1772,7 +1783,7 @@ static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
if (priv->attach->dev == indio_dev->dev.parent
if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
list_del(&priv->entry);

View File

@ -50,6 +50,7 @@ struct sg_table;
* @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
* object to this buffer. Requires a valid DMABUF fd, that
* was previouly attached to this buffer.
* @get_dma_dev: called to get the DMA channel associated with this buffer.
* @lock_queue: called when the core needs to lock the buffer queue;
* it is used when enqueueing DMABUF objects.
* @unlock_queue: used to unlock a previously locked buffer queue
@ -90,6 +91,7 @@ struct iio_buffer_access_funcs {
struct iio_dma_buffer_block *block,
struct dma_fence *fence, struct sg_table *sgt,
size_t size, bool cyclic);
struct device * (*get_dma_dev)(struct iio_buffer *buffer);
void (*lock_queue)(struct iio_buffer *buffer);
void (*unlock_queue)(struct iio_buffer *buffer);