ceph: make ceph_start_io_*() killable

This allows killing processes that wait for a lock when one process is
stuck waiting for the Ceph server.  This is similar to the NFS commit
38a125b315 ("fs/nfs/io: make nfs_start_io_*() killable").

[ idryomov: drop comment on include, formatting ]

Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
pull/1354/merge
Max Kellermann 2024-12-06 17:50:14 +01:00 committed by Ilya Dryomov
parent 27c0a7b05d
commit fa07303946
3 changed files with 49 additions and 26 deletions

View File

@ -2121,10 +2121,10 @@ again:
if (ceph_inode_is_shutdown(inode)) if (ceph_inode_is_shutdown(inode))
return -ESTALE; return -ESTALE;
if (direct_lock) ret = direct_lock ? ceph_start_io_direct(inode) :
ceph_start_io_direct(inode); ceph_start_io_read(inode);
else if (ret)
ceph_start_io_read(inode); return ret;
if (!(fi->flags & CEPH_F_SYNC) && !direct_lock) if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
want |= CEPH_CAP_FILE_CACHE; want |= CEPH_CAP_FILE_CACHE;
@ -2277,7 +2277,9 @@ static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
(fi->flags & CEPH_F_SYNC)) (fi->flags & CEPH_F_SYNC))
return copy_splice_read(in, ppos, pipe, len, flags); return copy_splice_read(in, ppos, pipe, len, flags);
ceph_start_io_read(inode); ret = ceph_start_io_read(inode);
if (ret)
return ret;
want = CEPH_CAP_FILE_CACHE; want = CEPH_CAP_FILE_CACHE;
if (fi->fmode & CEPH_FILE_MODE_LAZY) if (fi->fmode & CEPH_FILE_MODE_LAZY)
@ -2356,10 +2358,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
direct_lock = true; direct_lock = true;
retry_snap: retry_snap:
if (direct_lock) err = direct_lock ? ceph_start_io_direct(inode) :
ceph_start_io_direct(inode); ceph_start_io_write(inode);
else if (err)
ceph_start_io_write(inode); goto out_unlocked;
if (iocb->ki_flags & IOCB_APPEND) { if (iocb->ki_flags & IOCB_APPEND) {
err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);

View File

@ -47,20 +47,29 @@ static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on * Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads. * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
*/ */
void int ceph_start_io_read(struct inode *inode)
ceph_start_io_read(struct inode *inode)
{ {
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
int err;
/* Be an optimist! */ /* Be an optimist! */
down_read(&inode->i_rwsem); err = down_read_killable(&inode->i_rwsem);
if (err)
return err;
if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
return; return 0;
up_read(&inode->i_rwsem); up_read(&inode->i_rwsem);
/* Slow path.... */ /* Slow path.... */
down_write(&inode->i_rwsem); err = down_write_killable(&inode->i_rwsem);
if (err)
return err;
ceph_block_o_direct(ci, inode); ceph_block_o_direct(ci, inode);
downgrade_write(&inode->i_rwsem); downgrade_write(&inode->i_rwsem);
return 0;
} }
/** /**
@ -83,11 +92,12 @@ ceph_end_io_read(struct inode *inode)
* Declare that a buffered write operation is about to start, and ensure * Declare that a buffered write operation is about to start, and ensure
* that we block all direct I/O. * that we block all direct I/O.
*/ */
void int ceph_start_io_write(struct inode *inode)
ceph_start_io_write(struct inode *inode)
{ {
down_write(&inode->i_rwsem); int err = down_write_killable(&inode->i_rwsem);
ceph_block_o_direct(ceph_inode(inode), inode); if (!err)
ceph_block_o_direct(ceph_inode(inode), inode);
return err;
} }
/** /**
@ -133,20 +143,29 @@ static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on * Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT. * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
*/ */
void int ceph_start_io_direct(struct inode *inode)
ceph_start_io_direct(struct inode *inode)
{ {
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
int err;
/* Be an optimist! */ /* Be an optimist! */
down_read(&inode->i_rwsem); err = down_read_killable(&inode->i_rwsem);
if (err)
return err;
if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
return; return 0;
up_read(&inode->i_rwsem); up_read(&inode->i_rwsem);
/* Slow path.... */ /* Slow path.... */
down_write(&inode->i_rwsem); err = down_write_killable(&inode->i_rwsem);
if (err)
return err;
ceph_block_buffered(ci, inode); ceph_block_buffered(ci, inode);
downgrade_write(&inode->i_rwsem); downgrade_write(&inode->i_rwsem);
return 0;
} }
/** /**

View File

@ -2,11 +2,13 @@
#ifndef _FS_CEPH_IO_H #ifndef _FS_CEPH_IO_H
#define _FS_CEPH_IO_H #define _FS_CEPH_IO_H
void ceph_start_io_read(struct inode *inode); #include <linux/compiler_attributes.h>
int __must_check ceph_start_io_read(struct inode *inode);
void ceph_end_io_read(struct inode *inode); void ceph_end_io_read(struct inode *inode);
void ceph_start_io_write(struct inode *inode); int __must_check ceph_start_io_write(struct inode *inode);
void ceph_end_io_write(struct inode *inode); void ceph_end_io_write(struct inode *inode);
void ceph_start_io_direct(struct inode *inode); int __must_check ceph_start_io_direct(struct inode *inode);
void ceph_end_io_direct(struct inode *inode); void ceph_end_io_direct(struct inode *inode);
#endif /* FS_CEPH_IO_H */ #endif /* FS_CEPH_IO_H */