io_uring/zcrx: rename dma lock
In preparation for reusing the lock for other purposes, rename it to "pp_lock". As before, it can be taken deeper inside the networking stack by page pool, and so the syscall io_uring must avoid holding it while doing queue reconfiguration or anything that can result in immediate pp init/destruction. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1354/merge
parent
d8d135dfe3
commit
4f602f3112
|
|
@ -253,7 +253,7 @@ static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
|
|||
{
|
||||
int i;
|
||||
|
||||
guard(mutex)(&ifq->dma_lock);
|
||||
guard(mutex)(&ifq->pp_lock);
|
||||
if (!area->is_mapped)
|
||||
return;
|
||||
area->is_mapped = false;
|
||||
|
|
@ -273,7 +273,7 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
|
|||
{
|
||||
int ret;
|
||||
|
||||
guard(mutex)(&ifq->dma_lock);
|
||||
guard(mutex)(&ifq->pp_lock);
|
||||
if (area->is_mapped)
|
||||
return 0;
|
||||
|
||||
|
|
@ -478,7 +478,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
|
|||
ifq->ctx = ctx;
|
||||
spin_lock_init(&ifq->lock);
|
||||
spin_lock_init(&ifq->rq_lock);
|
||||
mutex_init(&ifq->dma_lock);
|
||||
mutex_init(&ifq->pp_lock);
|
||||
return ifq;
|
||||
}
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
|
|||
put_device(ifq->dev);
|
||||
|
||||
io_free_rbuf_ring(ifq);
|
||||
mutex_destroy(&ifq->dma_lock);
|
||||
mutex_destroy(&ifq->pp_lock);
|
||||
kfree(ifq);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -54,7 +54,12 @@ struct io_zcrx_ifq {
|
|||
struct net_device *netdev;
|
||||
netdevice_tracker netdev_tracker;
|
||||
spinlock_t lock;
|
||||
struct mutex dma_lock;
|
||||
|
||||
/*
|
||||
* Page pool and net configuration lock, can be taken deeper in the
|
||||
* net stack.
|
||||
*/
|
||||
struct mutex pp_lock;
|
||||
struct io_mapped_region region;
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue