io_uring: only publish fully handled mem region
io_register_mem_region() can try to remove a region right after publishing it. This non-atomicity is annoying. Do it in two steps similar to io_register_mem_region(), create memory first and publish it once the rest of the handling is done. Remove now unused io_create_region_mmap_safe(), which was assumed to be a temporary solution from day one. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1354/merge
parent
dec10a1ad1
commit
5b6d8a032e
|
|
@ -234,27 +234,6 @@ out_free:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
|
|
||||||
struct io_uring_region_desc *reg,
|
|
||||||
unsigned long mmap_offset)
|
|
||||||
{
|
|
||||||
struct io_mapped_region tmp_mr;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
memcpy(&tmp_mr, mr, sizeof(tmp_mr));
|
|
||||||
ret = io_create_region(ctx, &tmp_mr, reg, mmap_offset);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Once published mmap can find it without holding only the ->mmap_lock
|
|
||||||
* and not ->uring_lock.
|
|
||||||
*/
|
|
||||||
guard(mutex)(&ctx->mmap_lock);
|
|
||||||
memcpy(mr, &tmp_mr, sizeof(tmp_mr));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
|
static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
|
||||||
loff_t pgoff)
|
loff_t pgoff)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -36,4 +36,16 @@ static inline bool io_region_is_set(struct io_mapped_region *mr)
|
||||||
return !!mr->nr_pages;
|
return !!mr->nr_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void io_region_publish(struct io_ring_ctx *ctx,
|
||||||
|
struct io_mapped_region *src_region,
|
||||||
|
struct io_mapped_region *dst_region)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Once published mmap can find it without holding only the ->mmap_lock
|
||||||
|
* and not ->uring_lock.
|
||||||
|
*/
|
||||||
|
guard(mutex)(&ctx->mmap_lock);
|
||||||
|
*dst_region = *src_region;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -576,6 +576,7 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
|
||||||
struct io_uring_mem_region_reg reg;
|
struct io_uring_mem_region_reg reg;
|
||||||
struct io_uring_region_desc __user *rd_uptr;
|
struct io_uring_region_desc __user *rd_uptr;
|
||||||
struct io_uring_region_desc rd;
|
struct io_uring_region_desc rd;
|
||||||
|
struct io_mapped_region region = {};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (io_region_is_set(&ctx->param_region))
|
if (io_region_is_set(&ctx->param_region))
|
||||||
|
|
@ -599,20 +600,20 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
|
||||||
!(ctx->flags & IORING_SETUP_R_DISABLED))
|
!(ctx->flags & IORING_SETUP_R_DISABLED))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = io_create_region_mmap_safe(ctx, &ctx->param_region, &rd,
|
ret = io_create_region(ctx, ®ion, &rd, IORING_MAP_OFF_PARAM_REGION);
|
||||||
IORING_MAP_OFF_PARAM_REGION);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
|
if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
|
||||||
guard(mutex)(&ctx->mmap_lock);
|
io_free_region(ctx, ®ion);
|
||||||
io_free_region(ctx, &ctx->param_region);
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
|
if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
|
||||||
ctx->cq_wait_arg = io_region_get_ptr(&ctx->param_region);
|
ctx->cq_wait_arg = io_region_get_ptr(®ion);
|
||||||
ctx->cq_wait_size = rd.size;
|
ctx->cq_wait_size = rd.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
io_region_publish(ctx, ®ion, &ctx->param_region);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue