io_uring: only publish fully handled mem region

io_register_mem_region() can try to remove a region right after
publishing it. This non-atomicity is annoying. Do it in two steps
similar to io_register_mem_region(), create memory first and publish it
once the rest of the handling is done. Remove now unused
io_create_region_mmap_safe(), which was assumed to be a temporary
solution from day one.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Pavel Begunkov 2025-10-16 14:23:23 +01:00 committed by Jens Axboe
parent dec10a1ad1
commit 5b6d8a032e
3 changed files with 18 additions and 26 deletions

View File

@ -234,27 +234,6 @@ out_free:
return ret;
}
int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
struct io_uring_region_desc *reg,
unsigned long mmap_offset)
{
struct io_mapped_region tmp_mr;
int ret;
memcpy(&tmp_mr, mr, sizeof(tmp_mr));
ret = io_create_region(ctx, &tmp_mr, reg, mmap_offset);
if (ret)
return ret;
/*
* Once published mmap can find it without holding only the ->mmap_lock
* and not ->uring_lock.
*/
guard(mutex)(&ctx->mmap_lock);
memcpy(mr, &tmp_mr, sizeof(tmp_mr));
return 0;
}
static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
loff_t pgoff)
{

View File

@ -36,4 +36,16 @@ static inline bool io_region_is_set(struct io_mapped_region *mr)
return !!mr->nr_pages;
}
static inline void io_region_publish(struct io_ring_ctx *ctx,
struct io_mapped_region *src_region,
struct io_mapped_region *dst_region)
{
/*
* Once published mmap can find it without holding only the ->mmap_lock
* and not ->uring_lock.
*/
guard(mutex)(&ctx->mmap_lock);
*dst_region = *src_region;
}
#endif

View File

@ -576,6 +576,7 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
struct io_uring_mem_region_reg reg;
struct io_uring_region_desc __user *rd_uptr;
struct io_uring_region_desc rd;
struct io_mapped_region region = {};
int ret;
if (io_region_is_set(&ctx->param_region))
@ -599,20 +600,20 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
!(ctx->flags & IORING_SETUP_R_DISABLED))
return -EINVAL;
ret = io_create_region_mmap_safe(ctx, &ctx->param_region, &rd,
IORING_MAP_OFF_PARAM_REGION);
ret = io_create_region(ctx, &region, &rd, IORING_MAP_OFF_PARAM_REGION);
if (ret)
return ret;
if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
guard(mutex)(&ctx->mmap_lock);
io_free_region(ctx, &ctx->param_region);
io_free_region(ctx, &region);
return -EFAULT;
}
if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
ctx->cq_wait_arg = io_region_get_ptr(&ctx->param_region);
ctx->cq_wait_arg = io_region_get_ptr(&region);
ctx->cq_wait_size = rd.size;
}
io_region_publish(ctx, &region, &ctx->param_region);
return 0;
}