io_uring/zcrx: return error from io_zcrx_map_area_*

io_zcrx_map_area_*() helpers return the number of processed niovs, which
we use to unroll some of the mappings for user memory areas. It's
unhandy, and dmabuf doesn't care about it. Return an error code instead
and move failure partial unmapping into io_zcrx_map_area_umem().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/42668e82be3a84b07ee8fc76d1d6d5ac0f137fe5.1751466461.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1309/head
Pavel Begunkov 2025-07-02 15:29:05 +01:00 committed by Jens Axboe
parent e9a9ddb15b
commit 06897ddfc5
1 changed files with 14 additions and 13 deletions

View File

@ -141,13 +141,13 @@ static int io_zcrx_map_area_dmabuf(struct io_zcrx_ifq *ifq, struct io_zcrx_area
struct net_iov *niov = &area->nia.niovs[niov_idx];
if (net_mp_niov_set_dma_addr(niov, dma))
return 0;
return -EFAULT;
sg_len -= PAGE_SIZE;
dma += PAGE_SIZE;
niov_idx++;
}
}
return niov_idx;
return 0;
}
static int io_import_umem(struct io_zcrx_ifq *ifq,
@ -256,29 +256,30 @@ static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *a
break;
}
}
return i;
if (i != area->nia.num_niovs) {
__io_zcrx_unmap_area(ifq, area, i);
return -EINVAL;
}
return 0;
}
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
unsigned nr;
int ret;
guard(mutex)(&ifq->dma_lock);
if (area->is_mapped)
return 0;
if (area->mem.is_dmabuf)
nr = io_zcrx_map_area_dmabuf(ifq, area);
ret = io_zcrx_map_area_dmabuf(ifq, area);
else
nr = io_zcrx_map_area_umem(ifq, area);
ret = io_zcrx_map_area_umem(ifq, area);
if (nr != area->nia.num_niovs) {
__io_zcrx_unmap_area(ifq, area, nr);
return -EINVAL;
}
area->is_mapped = true;
return 0;
if (ret == 0)
area->is_mapped = true;
return ret;
}
static void io_zcrx_sync_for_device(const struct page_pool *pool,