io_uring/zcrx: split common area map/unmap parts
Extract area type depedent parts of io_zcrx_[un]map_area from the generic path. It'll be helpful once there are more area memory types and not only user pages. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/50f6e893e2d20f937e628196cbf528d15f81c289.1746097431.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1250/head
parent
782dfa329a
commit
8a62804248
|
|
@ -82,22 +82,31 @@ static int io_import_area(struct io_zcrx_ifq *ifq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
|
static void io_zcrx_unmap_umem(struct io_zcrx_ifq *ifq,
|
||||||
struct io_zcrx_area *area, int nr_mapped)
|
struct io_zcrx_area *area, int nr_mapped)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nr_mapped; i++) {
|
for (i = 0; i < nr_mapped; i++) {
|
||||||
struct net_iov *niov = &area->nia.niovs[i];
|
netmem_ref netmem = net_iov_to_netmem(&area->nia.niovs[i]);
|
||||||
dma_addr_t dma;
|
dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
|
||||||
|
|
||||||
dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
|
|
||||||
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
|
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
|
||||||
DMA_FROM_DEVICE, IO_DMA_ATTR);
|
DMA_FROM_DEVICE, IO_DMA_ATTR);
|
||||||
net_mp_niov_set_dma_addr(niov, 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
|
||||||
|
struct io_zcrx_area *area, int nr_mapped)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
io_zcrx_unmap_umem(ifq, area, nr_mapped);
|
||||||
|
|
||||||
|
for (i = 0; i < area->nia.num_niovs; i++)
|
||||||
|
net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
|
||||||
|
}
|
||||||
|
|
||||||
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
||||||
{
|
{
|
||||||
guard(mutex)(&ifq->dma_lock);
|
guard(mutex)(&ifq->dma_lock);
|
||||||
|
|
@ -107,14 +116,10 @@ static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *are
|
||||||
area->is_mapped = false;
|
area->is_mapped = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
guard(mutex)(&ifq->dma_lock);
|
|
||||||
if (area->is_mapped)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for (i = 0; i < area->nia.num_niovs; i++) {
|
for (i = 0; i < area->nia.num_niovs; i++) {
|
||||||
struct net_iov *niov = &area->nia.niovs[i];
|
struct net_iov *niov = &area->nia.niovs[i];
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
|
|
@ -129,9 +134,20 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
if (i != area->nia.num_niovs) {
|
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
||||||
__io_zcrx_unmap_area(ifq, area, i);
|
{
|
||||||
|
unsigned nr;
|
||||||
|
|
||||||
|
guard(mutex)(&ifq->dma_lock);
|
||||||
|
if (area->is_mapped)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nr = io_zcrx_map_area_umem(ifq, area);
|
||||||
|
if (nr != area->nia.num_niovs) {
|
||||||
|
__io_zcrx_unmap_area(ifq, area, nr);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue