vfs-6.16-rc1.netfs

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaDBPUAAKCRCRxhvAZXjc
 ouMEAQCrviYPG/WMtPTH7nBIbfVQTfNEXt/TvN7u7OjXb+RwRAEAwe9tLy4GrS/t
 GuvUPWAthbhs77LTvxj6m3Gf49BOVgQ=
 =6FqN
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.16-rc1.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull netfs updates from Christian Brauner:

 - The main API document has been extensively updated/rewritten

 - Fix an oops in write-retry due to mis-resetting the I/O iterator

 - Fix the recording of transferred bytes for short DIO reads

 - Fix a request's work item to not require a reference, thereby
   avoiding the need to get rid of it in BH/IRQ context

 - Fix waiting and waking to be consistent about the waitqueue used

 - Remove NETFS_SREQ_SEEK_DATA_READ, NETFS_INVALID_WRITE,
   NETFS_ICTX_WRITETHROUGH, NETFS_READ_HOLE_CLEAR,
   NETFS_RREQ_DONT_UNLOCK_FOLIOS, and NETFS_RREQ_BLOCKED

 - Reorder structs to eliminate holes

 - Remove netfs_io_request::ractl

 - Only provide proc_link field if CONFIG_PROC_FS=y

 - Remove folio_queue::marks3

 - Fix undifferentiation of DIO reads from unbuffered reads

* tag 'vfs-6.16-rc1.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  netfs: Fix undifferentiation of DIO reads from unbuffered reads
  netfs: Fix wait/wake to be consistent about the waitqueue used
  netfs: Fix the request's work item to not require a ref
  netfs: Fix setting of transferred bytes with short DIO reads
  netfs: Fix oops in write-retry from mis-resetting the subreq iterator
  fs/netfs: remove unused flag NETFS_RREQ_BLOCKED
  fs/netfs: remove unused flag NETFS_RREQ_DONT_UNLOCK_FOLIOS
  folio_queue: remove unused field `marks3`
  fs/netfs: declare field `proc_link` only if CONFIG_PROC_FS=y
  fs/netfs: remove `netfs_io_request.ractl`
  fs/netfs: reorder struct fields to eliminate holes
  fs/netfs: remove unused enum choice NETFS_READ_HOLE_CLEAR
  fs/netfs: remove unused flag NETFS_ICTX_WRITETHROUGH
  fs/netfs: remove unused source NETFS_INVALID_WRITE
  fs/netfs: remove unused flag NETFS_SREQ_SEEK_DATA_READ
pull/1255/head
Linus Torvalds 2025-06-02 15:04:06 -07:00
commit 0fb34422b5
33 changed files with 478 additions and 487 deletions

View File

@ -151,19 +151,16 @@ The marks can be set by::
void folioq_mark(struct folio_queue *folioq, unsigned int slot); void folioq_mark(struct folio_queue *folioq, unsigned int slot);
void folioq_mark2(struct folio_queue *folioq, unsigned int slot); void folioq_mark2(struct folio_queue *folioq, unsigned int slot);
void folioq_mark3(struct folio_queue *folioq, unsigned int slot);
Cleared by:: Cleared by::
void folioq_unmark(struct folio_queue *folioq, unsigned int slot); void folioq_unmark(struct folio_queue *folioq, unsigned int slot);
void folioq_unmark2(struct folio_queue *folioq, unsigned int slot); void folioq_unmark2(struct folio_queue *folioq, unsigned int slot);
void folioq_unmark3(struct folio_queue *folioq, unsigned int slot);
And the marks can be queried by:: And the marks can be queried by::
bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot); bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot);
bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot); bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot);
bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot);
The marks can be used for any purpose and are not interpreted by this API. The marks can be used for any purpose and are not interpreted by this API.

View File

@ -712,11 +712,6 @@ handle falling back from one source type to another. The members are:
at a boundary with the filesystem structure (e.g. at the end of a Ceph at a boundary with the filesystem structure (e.g. at the end of a Ceph
object). It tells netfslib not to retile subrequests across it. object). It tells netfslib not to retile subrequests across it.
* ``NETFS_SREQ_SEEK_DATA_READ``
This is a hint from netfslib to the cache that it might want to try
skipping ahead to the next data (ie. using SEEK_DATA).
* ``error`` * ``error``
This is for the filesystem to store result of the subrequest. It should be This is for the filesystem to store result of the subrequest. It should be

View File

@ -59,7 +59,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err); len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
if (len > 0) if (len > 0)
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_write_subrequest_terminated(subreq, len ?: err, false); netfs_write_subrequest_terminated(subreq, len ?: err);
} }
/** /**
@ -77,7 +77,8 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
/* if we just extended the file size, any portion not in /* if we just extended the file size, any portion not in
* cache won't be on server and is zeroes */ * cache won't be on server and is zeroes */
if (subreq->rreq->origin != NETFS_DIO_READ) if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (pos + total >= i_size_read(rreq->inode)) if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);

View File

@ -120,17 +120,17 @@ static void afs_issue_write_worker(struct work_struct *work)
#if 0 // Error injection #if 0 // Error injection
if (subreq->debug_index == 3) if (subreq->debug_index == 3)
return netfs_write_subrequest_terminated(subreq, -ENOANO, false); return netfs_write_subrequest_terminated(subreq, -ENOANO);
if (!subreq->retry_count) { if (!subreq->retry_count) {
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); return netfs_write_subrequest_terminated(subreq, -EAGAIN);
} }
#endif #endif
op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
if (IS_ERR(op)) if (IS_ERR(op))
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); return netfs_write_subrequest_terminated(subreq, -EAGAIN);
afs_op_set_vnode(op, 0, vnode); afs_op_set_vnode(op, 0, vnode);
op->file[0].dv_delta = 1; op->file[0].dv_delta = 1;
@ -166,7 +166,7 @@ static void afs_issue_write_worker(struct work_struct *work)
break; break;
} }
netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false); netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len);
} }
void afs_issue_write(struct netfs_io_subrequest *subreq) void afs_issue_write(struct netfs_io_subrequest *subreq)
@ -202,6 +202,7 @@ void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *st
case NETFS_READ_GAPS: case NETFS_READ_GAPS:
case NETFS_READ_SINGLE: case NETFS_READ_SINGLE:
case NETFS_READ_FOR_WRITE: case NETFS_READ_FOR_WRITE:
case NETFS_UNBUFFERED_READ:
case NETFS_DIO_READ: case NETFS_DIO_READ:
return; return;
default: default:

View File

@ -63,7 +63,7 @@ static void cachefiles_read_complete(struct kiocb *iocb, long ret)
ret = -ESTALE; ret = -ESTALE;
} }
ki->term_func(ki->term_func_priv, ret, ki->was_async); ki->term_func(ki->term_func_priv, ret);
} }
cachefiles_put_kiocb(ki); cachefiles_put_kiocb(ki);
@ -188,7 +188,7 @@ in_progress:
presubmission_error: presubmission_error:
if (term_func) if (term_func)
term_func(term_func_priv, ret < 0 ? ret : skipped, false); term_func(term_func_priv, ret < 0 ? ret : skipped);
return ret; return ret;
} }
@ -271,7 +271,7 @@ static void cachefiles_write_complete(struct kiocb *iocb, long ret)
atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing); atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags); set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
if (ki->term_func) if (ki->term_func)
ki->term_func(ki->term_func_priv, ret, ki->was_async); ki->term_func(ki->term_func_priv, ret);
cachefiles_put_kiocb(ki); cachefiles_put_kiocb(ki);
} }
@ -301,7 +301,7 @@ int __cachefiles_write(struct cachefiles_object *object,
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL); ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki) { if (!ki) {
if (term_func) if (term_func)
term_func(term_func_priv, -ENOMEM, false); term_func(term_func_priv, -ENOMEM);
return -ENOMEM; return -ENOMEM;
} }
@ -366,7 +366,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
{ {
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) { if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
if (term_func) if (term_func)
term_func(term_func_priv, -ENOBUFS, false); term_func(term_func_priv, -ENOBUFS);
trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite); trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite);
return -ENOBUFS; return -ENOBUFS;
} }
@ -665,7 +665,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
pre = CACHEFILES_DIO_BLOCK_SIZE - off; pre = CACHEFILES_DIO_BLOCK_SIZE - off;
if (pre >= len) { if (pre >= len) {
fscache_count_dio_misfit(); fscache_count_dio_misfit();
netfs_write_subrequest_terminated(subreq, len, false); netfs_write_subrequest_terminated(subreq, len);
return; return;
} }
subreq->transferred += pre; subreq->transferred += pre;
@ -691,7 +691,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
len -= post; len -= post;
if (len == 0) { if (len == 0) {
fscache_count_dio_misfit(); fscache_count_dio_misfit();
netfs_write_subrequest_terminated(subreq, post, false); netfs_write_subrequest_terminated(subreq, post);
return; return;
} }
iov_iter_truncate(&subreq->io_iter, len); iov_iter_truncate(&subreq->io_iter, len);
@ -703,7 +703,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
&start, &len, len, true); &start, &len, len, true);
cachefiles_end_secure(cache, saved_cred); cachefiles_end_secure(cache, saved_cred);
if (ret < 0) { if (ret < 0) {
netfs_write_subrequest_terminated(subreq, ret, false); netfs_write_subrequest_terminated(subreq, ret);
return; return;
} }

View File

@ -238,6 +238,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (sparse && err > 0) if (sparse && err > 0)
err = ceph_sparse_ext_map_end(op); err = ceph_sparse_ext_map_end(op);
if (err < subreq->len && if (err < subreq->len &&
subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
subreq->rreq->origin != NETFS_DIO_READ) subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (IS_ENCRYPTED(inode) && err > 0) { if (IS_ENCRYPTED(inode) && err > 0) {
@ -281,7 +282,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
size_t len; size_t len;
int mode; int mode;
if (rreq->origin != NETFS_DIO_READ) if (rreq->origin != NETFS_UNBUFFERED_READ &&
rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
@ -539,7 +541,7 @@ static void ceph_set_page_fscache(struct page *page)
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
} }
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) static void ceph_fscache_write_terminated(void *priv, ssize_t error)
{ {
struct inode *inode = priv; struct inode *inode = priv;

View File

@ -102,8 +102,7 @@ static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
erofs_fscache_req_put(req); erofs_fscache_req_put(req);
} }
static void erofs_fscache_req_end_io(void *priv, static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error)
ssize_t transferred_or_error, bool was_async)
{ {
struct erofs_fscache_io *io = priv; struct erofs_fscache_io *io = priv;
struct erofs_fscache_rq *req = io->private; struct erofs_fscache_rq *req = io->private;
@ -180,8 +179,7 @@ struct erofs_fscache_bio {
struct bio_vec bvecs[BIO_MAX_VECS]; struct bio_vec bvecs[BIO_MAX_VECS];
}; };
static void erofs_fscache_bio_endio(void *priv, static void erofs_fscache_bio_endio(void *priv, ssize_t transferred_or_error)
ssize_t transferred_or_error, bool was_async)
{ {
struct erofs_fscache_bio *io = priv; struct erofs_fscache_bio *io = priv;

View File

@ -78,7 +78,8 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
* [!] NOTE: This must be run in the same thread as ->issue_read() was called * [!] NOTE: This must be run in the same thread as ->issue_read() was called
* in as we access the readahead_control struct. * in as we access the readahead_control struct.
*/ */
static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq,
struct readahead_control *ractl)
{ {
struct netfs_io_request *rreq = subreq->rreq; struct netfs_io_request *rreq = subreq->rreq;
size_t rsize = subreq->len; size_t rsize = subreq->len;
@ -86,7 +87,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER) if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER)
rsize = umin(rsize, rreq->io_streams[0].sreq_max_len); rsize = umin(rsize, rreq->io_streams[0].sreq_max_len);
if (rreq->ractl) { if (ractl) {
/* If we don't have sufficient folios in the rolling buffer, /* If we don't have sufficient folios in the rolling buffer,
* extract a folioq's worth from the readahead region at a time * extract a folioq's worth from the readahead region at a time
* into the buffer. Note that this acquires a ref on each page * into the buffer. Note that this acquires a ref on each page
@ -99,7 +100,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
while (rreq->submitted < subreq->start + rsize) { while (rreq->submitted < subreq->start + rsize) {
ssize_t added; ssize_t added;
added = rolling_buffer_load_from_ra(&rreq->buffer, rreq->ractl, added = rolling_buffer_load_from_ra(&rreq->buffer, ractl,
&put_batch); &put_batch);
if (added < 0) if (added < 0)
return added; return added;
@ -211,7 +212,8 @@ static void netfs_issue_read(struct netfs_io_request *rreq,
* slicing up the region to be read according to available cache blocks and * slicing up the region to be read according to available cache blocks and
* network rsize. * network rsize.
*/ */
static void netfs_read_to_pagecache(struct netfs_io_request *rreq) static void netfs_read_to_pagecache(struct netfs_io_request *rreq,
struct readahead_control *ractl)
{ {
struct netfs_inode *ictx = netfs_inode(rreq->inode); struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned long long start = rreq->start; unsigned long long start = rreq->start;
@ -262,9 +264,9 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (ret < 0) { if (ret < 0) {
subreq->error = ret; subreq->error = ret;
/* Not queued - release both refs. */ /* Not queued - release both refs. */
netfs_put_subrequest(subreq, false, netfs_put_subrequest(subreq,
netfs_sreq_trace_put_cancel); netfs_sreq_trace_put_cancel);
netfs_put_subrequest(subreq, false, netfs_put_subrequest(subreq,
netfs_sreq_trace_put_cancel); netfs_sreq_trace_put_cancel);
break; break;
} }
@ -291,14 +293,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
break; break;
issue: issue:
slice = netfs_prepare_read_iterator(subreq); slice = netfs_prepare_read_iterator(subreq, ractl);
if (slice < 0) { if (slice < 0) {
ret = slice; ret = slice;
subreq->error = ret; subreq->error = ret;
trace_netfs_sreq(subreq, netfs_sreq_trace_cancel); trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
/* Not queued - release both refs. */ /* Not queued - release both refs. */
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
break; break;
} }
size -= slice; size -= slice;
@ -312,7 +314,7 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (unlikely(size > 0)) { if (unlikely(size > 0)) {
smp_wmb(); /* Write lists before ALL_QUEUED. */ smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
netfs_wake_read_collector(rreq); netfs_wake_collector(rreq);
} }
/* Defer error return as we may need to wait for outstanding I/O. */ /* Defer error return as we may need to wait for outstanding I/O. */
@ -359,18 +361,15 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl); netfs_rreq_expand(rreq, ractl);
rreq->ractl = ractl;
rreq->submitted = rreq->start; rreq->submitted = rreq->start;
if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
goto cleanup_free; goto cleanup_free;
netfs_read_to_pagecache(rreq); netfs_read_to_pagecache(rreq, ractl);
netfs_put_request(rreq, true, netfs_rreq_trace_put_return); return netfs_put_request(rreq, netfs_rreq_trace_put_return);
return;
cleanup_free: cleanup_free:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); return netfs_put_request(rreq, netfs_rreq_trace_put_failed);
return;
} }
EXPORT_SYMBOL(netfs_readahead); EXPORT_SYMBOL(netfs_readahead);
@ -389,7 +388,6 @@ static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct fo
if (added < 0) if (added < 0)
return added; return added;
rreq->submitted = rreq->start + added; rreq->submitted = rreq->start + added;
rreq->ractl = (struct readahead_control *)1UL;
return 0; return 0;
} }
@ -459,7 +457,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len);
rreq->submitted = rreq->start + flen; rreq->submitted = rreq->start + flen;
netfs_read_to_pagecache(rreq); netfs_read_to_pagecache(rreq, NULL);
if (sink) if (sink)
folio_put(sink); folio_put(sink);
@ -470,11 +468,11 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
} }
folio_unlock(folio); folio_unlock(folio);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
discard: discard:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); netfs_put_request(rreq, netfs_rreq_trace_put_discard);
alloc_error: alloc_error:
folio_unlock(folio); folio_unlock(folio);
return ret; return ret;
@ -528,13 +526,13 @@ int netfs_read_folio(struct file *file, struct folio *folio)
if (ret < 0) if (ret < 0)
goto discard; goto discard;
netfs_read_to_pagecache(rreq); netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq); ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
discard: discard:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); netfs_put_request(rreq, netfs_rreq_trace_put_discard);
alloc_error: alloc_error:
folio_unlock(folio); folio_unlock(folio);
return ret; return ret;
@ -685,11 +683,11 @@ retry:
if (ret < 0) if (ret < 0)
goto error_put; goto error_put;
netfs_read_to_pagecache(rreq); netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq); ret = netfs_wait_for_read(rreq);
if (ret < 0) if (ret < 0)
goto error; goto error;
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
have_folio: have_folio:
ret = folio_wait_private_2_killable(folio); ret = folio_wait_private_2_killable(folio);
@ -701,7 +699,7 @@ have_folio_no_wait:
return 0; return 0;
error_put: error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); netfs_put_request(rreq, netfs_rreq_trace_put_failed);
error: error:
if (folio) { if (folio) {
folio_unlock(folio); folio_unlock(folio);
@ -750,13 +748,13 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
if (ret < 0) if (ret < 0)
goto error_put; goto error_put;
netfs_read_to_pagecache(rreq); netfs_read_to_pagecache(rreq, NULL);
ret = netfs_wait_for_read(rreq); ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
error_put: error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); netfs_put_request(rreq, netfs_rreq_trace_put_discard);
error: error:
_leave(" = %d", ret); _leave(" = %d", ret);
return ret; return ret;

View File

@ -115,8 +115,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
size_t max_chunk = mapping_max_folio_size(mapping); size_t max_chunk = mapping_max_folio_size(mapping);
bool maybe_trouble = false; bool maybe_trouble = false;
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) || if (unlikely(iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
) { ) {
wbc_attach_fdatawrite_inode(&wbc, mapping->host); wbc_attach_fdatawrite_inode(&wbc, mapping->host);
@ -386,7 +385,7 @@ out:
wbc_detach_inode(&wbc); wbc_detach_inode(&wbc);
if (ret2 == -EIOCBQUEUED) if (ret2 == -EIOCBQUEUED)
return ret2; return ret2;
if (ret == 0) if (ret == 0 && ret2 < 0)
ret = ret2; ret = ret2;
} }

View File

@ -85,7 +85,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
if (rreq->netfs_ops->prepare_read) { if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq); ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) { if (ret < 0) {
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
break; break;
} }
} }
@ -103,19 +103,16 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
rreq->netfs_ops->issue_read(subreq); rreq->netfs_ops->issue_read(subreq);
if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
netfs_wait_for_pause(rreq); netfs_wait_for_paused_read(rreq);
if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
break; break;
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
break;
cond_resched(); cond_resched();
} while (size > 0); } while (size > 0);
if (unlikely(size > 0)) { if (unlikely(size > 0)) {
smp_wmb(); /* Write lists before ALL_QUEUED. */ smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
netfs_wake_read_collector(rreq); netfs_wake_collector(rreq);
} }
return ret; return ret;
@ -144,7 +141,7 @@ static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
ret = netfs_dispatch_unbuffered_reads(rreq); ret = netfs_dispatch_unbuffered_reads(rreq);
if (!rreq->submitted) { if (!rreq->submitted) {
netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit); netfs_put_request(rreq, netfs_rreq_trace_put_no_submit);
inode_dio_end(rreq->inode); inode_dio_end(rreq->inode);
ret = 0; ret = 0;
goto out; goto out;
@ -188,7 +185,8 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
iocb->ki_pos, orig_count, iocb->ki_pos, orig_count,
NETFS_DIO_READ); iocb->ki_flags & IOCB_DIRECT ?
NETFS_DIO_READ : NETFS_UNBUFFERED_READ);
if (IS_ERR(rreq)) if (IS_ERR(rreq))
return PTR_ERR(rreq); return PTR_ERR(rreq);
@ -236,7 +234,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
} }
out: out:
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
if (ret > 0) if (ret > 0)
orig_count -= ret; orig_count -= ret;
return ret; return ret;

View File

@ -87,6 +87,8 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
} }
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags); __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
if (async)
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */ /* Copy the data into the bounce buffer and encrypt it. */
// TODO // TODO
@ -105,19 +107,15 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
if (!async) { if (!async) {
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, ret = netfs_wait_for_write(wreq);
TASK_UNINTERRUPTIBLE); if (ret > 0)
ret = wreq->error;
if (ret == 0) {
ret = wreq->transferred;
iocb->ki_pos += ret; iocb->ki_pos += ret;
}
} else { } else {
ret = -EIOCBQUEUED; ret = -EIOCBQUEUED;
} }
out: out:
netfs_put_request(wreq, false, netfs_rreq_trace_put_return); netfs_put_request(wreq, netfs_rreq_trace_put_return);
return ret; return ret;
} }
EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked); EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);

View File

@ -192,8 +192,7 @@ EXPORT_SYMBOL(__fscache_clear_page_bits);
/* /*
* Deal with the completion of writing the data to the cache. * Deal with the completion of writing the data to the cache.
*/ */
static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, static void fscache_wreq_done(void *priv, ssize_t transferred_or_error)
bool was_async)
{ {
struct fscache_write_request *wreq = priv; struct fscache_write_request *wreq = priv;
@ -202,8 +201,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
wreq->set_bits); wreq->set_bits);
if (wreq->term_func) if (wreq->term_func)
wreq->term_func(wreq->term_func_priv, transferred_or_error, wreq->term_func(wreq->term_func_priv, transferred_or_error);
was_async);
fscache_end_operation(&wreq->cache_resources); fscache_end_operation(&wreq->cache_resources);
kfree(wreq); kfree(wreq);
} }
@ -255,14 +253,14 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
return; return;
abandon_end: abandon_end:
return fscache_wreq_done(wreq, ret, false); return fscache_wreq_done(wreq, ret);
abandon_free: abandon_free:
kfree(wreq); kfree(wreq);
abandon: abandon:
if (using_pgpriv2) if (using_pgpriv2)
fscache_clear_page_bits(mapping, start, len, cond); fscache_clear_page_bits(mapping, start, len, cond);
if (term_func) if (term_func)
term_func(term_func_priv, ret, false); term_func(term_func_priv, ret);
} }
EXPORT_SYMBOL(__fscache_write_to_cache); EXPORT_SYMBOL(__fscache_write_to_cache);

View File

@ -23,7 +23,7 @@
/* /*
* buffered_read.c * buffered_read.c
*/ */
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
int netfs_prefetch_for_write(struct file *file, struct folio *folio, int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len); size_t offset, size_t len);
@ -62,6 +62,14 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
enum netfs_folioq_trace trace); enum netfs_folioq_trace trace);
void netfs_reset_iter(struct netfs_io_subrequest *subreq); void netfs_reset_iter(struct netfs_io_subrequest *subreq);
void netfs_wake_collector(struct netfs_io_request *rreq);
void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq);
void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
struct netfs_io_stream *stream);
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
ssize_t netfs_wait_for_write(struct netfs_io_request *rreq);
void netfs_wait_for_paused_read(struct netfs_io_request *rreq);
void netfs_wait_for_paused_write(struct netfs_io_request *rreq);
/* /*
* objects.c * objects.c
@ -71,9 +79,8 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
loff_t start, size_t len, loff_t start, size_t len,
enum netfs_io_origin origin); enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_clear_subrequests(struct netfs_io_request *rreq);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async, void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
enum netfs_rreq_ref_trace what);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
static inline void netfs_see_request(struct netfs_io_request *rreq, static inline void netfs_see_request(struct netfs_io_request *rreq,
@ -92,11 +99,9 @@ static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
/* /*
* read_collect.c * read_collect.c
*/ */
bool netfs_read_collection(struct netfs_io_request *rreq);
void netfs_read_collection_worker(struct work_struct *work); void netfs_read_collection_worker(struct work_struct *work);
void netfs_wake_read_collector(struct netfs_io_request *rreq); void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
void netfs_wait_for_pause(struct netfs_io_request *rreq);
/* /*
* read_pgpriv2.c * read_pgpriv2.c
@ -176,8 +181,8 @@ static inline void netfs_stat_d(atomic_t *stat)
* write_collect.c * write_collect.c
*/ */
int netfs_folio_written_back(struct folio *folio); int netfs_folio_written_back(struct folio *folio);
bool netfs_write_collection(struct netfs_io_request *wreq);
void netfs_write_collection_worker(struct work_struct *work); void netfs_write_collection_worker(struct work_struct *work);
void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
/* /*
* write_issue.c * write_issue.c
@ -198,8 +203,8 @@ struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end, struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache); struct folio **writethrough_cache);
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache); struct folio *writethrough_cache);
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/* /*
@ -254,6 +259,21 @@ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
netfs_group->free(netfs_group); netfs_group->free(netfs_group);
} }
/*
* Clear and wake up a NETFS_RREQ_* flag bit on a request.
*/
static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq,
unsigned int rreq_flag,
enum netfs_rreq_trace trace)
{
if (test_bit(rreq_flag, &rreq->flags)) {
trace_netfs_rreq(rreq, trace);
clear_bit_unlock(rreq_flag, &rreq->flags);
smp_mb__after_atomic(); /* Set flag before task state */
wake_up(&rreq->waitq);
}
}
/* /*
* fscache-cache.c * fscache-cache.c
*/ */

View File

@ -39,6 +39,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READ_GAPS] = "RG", [NETFS_READ_GAPS] = "RG",
[NETFS_READ_SINGLE] = "R1", [NETFS_READ_SINGLE] = "R1",
[NETFS_READ_FOR_WRITE] = "RW", [NETFS_READ_FOR_WRITE] = "RW",
[NETFS_UNBUFFERED_READ] = "UR",
[NETFS_DIO_READ] = "DR", [NETFS_DIO_READ] = "DR",
[NETFS_WRITEBACK] = "WB", [NETFS_WRITEBACK] = "WB",
[NETFS_WRITEBACK_SINGLE] = "W1", [NETFS_WRITEBACK_SINGLE] = "W1",

View File

@ -313,3 +313,222 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
return true; return true;
} }
EXPORT_SYMBOL(netfs_release_folio); EXPORT_SYMBOL(netfs_release_folio);
/*
* Wake the collection work item.
*/
void netfs_wake_collector(struct netfs_io_request *rreq)
{
if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
!test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
queue_work(system_unbound_wq, &rreq->work);
} else {
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
wake_up(&rreq->waitq);
}
}
/*
* Mark a subrequest as no longer being in progress and, if need be, wake the
* collector.
*/
void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr];
clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
/* If we are at the head of the queue, wake up the collector. */
if (list_is_first(&subreq->rreq_link, &stream->subrequests) ||
test_bit(NETFS_RREQ_RETRYING, &rreq->flags))
netfs_wake_collector(rreq);
}
/*
* Wait for all outstanding I/O in a stream to quiesce.
*/
void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
struct netfs_io_stream *stream)
{
struct netfs_io_subrequest *subreq;
DEFINE_WAIT(myself);
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
continue;
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
for (;;) {
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
break;
trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
}
finish_wait(&rreq->waitq, &myself);
}
/*
* Perform collection in app thread if not offloaded to workqueue.
*/
static int netfs_collect_in_app(struct netfs_io_request *rreq,
bool (*collector)(struct netfs_io_request *rreq))
{
bool need_collect = false, inactive = true;
for (int i = 0; i < NR_IO_STREAMS; i++) {
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[i];
if (!stream->active)
continue;
inactive = false;
trace_netfs_collect_stream(rreq, stream);
subreq = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest,
rreq_link);
if (subreq &&
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
need_collect = true;
break;
}
}
if (!need_collect && !inactive)
return 0; /* Sleep */
__set_current_state(TASK_RUNNING);
if (collector(rreq)) {
/* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */
netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
return 1; /* Done */
}
if (inactive) {
WARN(true, "Failed to collect inactive req R=%08x\n",
rreq->debug_id);
cond_resched();
}
return 2; /* Again */
}
/*
* Wait for a request to complete, successfully or otherwise.
*/
static ssize_t netfs_wait_for_request(struct netfs_io_request *rreq,
bool (*collector)(struct netfs_io_request *rreq))
{
DEFINE_WAIT(myself);
ssize_t ret;
for (;;) {
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
switch (netfs_collect_in_app(rreq, collector)) {
case 0:
break;
case 1:
goto all_collected;
case 2:
continue;
}
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
break;
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
all_collected:
finish_wait(&rreq->waitq, &myself);
ret = rreq->error;
if (ret == 0) {
ret = rreq->transferred;
switch (rreq->origin) {
case NETFS_DIO_READ:
case NETFS_DIO_WRITE:
case NETFS_READ_SINGLE:
case NETFS_UNBUFFERED_READ:
case NETFS_UNBUFFERED_WRITE:
break;
default:
if (rreq->submitted < rreq->len) {
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
ret = -EIO;
}
break;
}
}
return ret;
}
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
{
return netfs_wait_for_request(rreq, netfs_read_collection);
}
ssize_t netfs_wait_for_write(struct netfs_io_request *rreq)
{
return netfs_wait_for_request(rreq, netfs_write_collection);
}
/*
* Wait for a paused operation to unpause or complete in some manner.
*/
static void netfs_wait_for_pause(struct netfs_io_request *rreq,
bool (*collector)(struct netfs_io_request *rreq))
{
DEFINE_WAIT(myself);
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
for (;;) {
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
switch (netfs_collect_in_app(rreq, collector)) {
case 0:
break;
case 1:
goto all_collected;
case 2:
continue;
}
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
!test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
break;
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
all_collected:
finish_wait(&rreq->waitq, &myself);
}
void netfs_wait_for_paused_read(struct netfs_io_request *rreq)
{
return netfs_wait_for_pause(rreq, netfs_read_collection);
}
void netfs_wait_for_paused_write(struct netfs_io_request *rreq)
{
return netfs_wait_for_pause(rreq, netfs_write_collection);
}

View File

@ -10,6 +10,8 @@
#include <linux/delay.h> #include <linux/delay.h>
#include "internal.h" #include "internal.h"
static void netfs_free_request(struct work_struct *work);
/* /*
* Allocate an I/O request and initialise it. * Allocate an I/O request and initialise it.
*/ */
@ -34,6 +36,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
} }
memset(rreq, 0, kmem_cache_size(cache)); memset(rreq, 0, kmem_cache_size(cache));
INIT_WORK(&rreq->cleanup_work, netfs_free_request);
rreq->start = start; rreq->start = start;
rreq->len = len; rreq->len = len;
rreq->origin = origin; rreq->origin = origin;
@ -49,13 +52,14 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
init_waitqueue_head(&rreq->waitq); init_waitqueue_head(&rreq->waitq);
refcount_set(&rreq->ref, 1); refcount_set(&rreq->ref, 2);
if (origin == NETFS_READAHEAD || if (origin == NETFS_READAHEAD ||
origin == NETFS_READPAGE || origin == NETFS_READPAGE ||
origin == NETFS_READ_GAPS || origin == NETFS_READ_GAPS ||
origin == NETFS_READ_SINGLE || origin == NETFS_READ_SINGLE ||
origin == NETFS_READ_FOR_WRITE || origin == NETFS_READ_FOR_WRITE ||
origin == NETFS_UNBUFFERED_READ ||
origin == NETFS_DIO_READ) { origin == NETFS_DIO_READ) {
INIT_WORK(&rreq->work, netfs_read_collection_worker); INIT_WORK(&rreq->work, netfs_read_collection_worker);
rreq->io_streams[0].avail = true; rreq->io_streams[0].avail = true;
@ -64,8 +68,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
} }
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) { if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file); ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) { if (ret < 0) {
@ -75,7 +77,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
} }
atomic_inc(&ctx->io_count); atomic_inc(&ctx->io_count);
trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new); trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
netfs_proc_add_rreq(rreq); netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq); netfs_stat(&netfs_n_rh_rreq);
return rreq; return rreq;
@ -89,7 +91,7 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
} }
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) void netfs_clear_subrequests(struct netfs_io_request *rreq)
{ {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream; struct netfs_io_stream *stream;
@ -101,8 +103,7 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
subreq = list_first_entry(&stream->subrequests, subreq = list_first_entry(&stream->subrequests,
struct netfs_io_subrequest, rreq_link); struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link); list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, was_async, netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
netfs_sreq_trace_put_clear);
} }
} }
} }
@ -118,13 +119,19 @@ static void netfs_free_request_rcu(struct rcu_head *rcu)
static void netfs_free_request(struct work_struct *work) static void netfs_free_request(struct work_struct *work)
{ {
struct netfs_io_request *rreq = struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work); container_of(work, struct netfs_io_request, cleanup_work);
struct netfs_inode *ictx = netfs_inode(rreq->inode); struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i; unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free); trace_netfs_rreq(rreq, netfs_rreq_trace_free);
/* Cancel/flush the result collection worker. That does not carry a
* ref of its own, so we must wait for it somewhere.
*/
cancel_work_sync(&rreq->work);
netfs_proc_del_rreq(rreq); netfs_proc_del_rreq(rreq);
netfs_clear_subrequests(rreq, false); netfs_clear_subrequests(rreq);
if (rreq->netfs_ops->free_request) if (rreq->netfs_ops->free_request)
rreq->netfs_ops->free_request(rreq); rreq->netfs_ops->free_request(rreq);
if (rreq->cache_resources.ops) if (rreq->cache_resources.ops)
@ -145,8 +152,7 @@ static void netfs_free_request(struct work_struct *work)
call_rcu(&rreq->rcu, netfs_free_request_rcu); call_rcu(&rreq->rcu, netfs_free_request_rcu);
} }
void netfs_put_request(struct netfs_io_request *rreq, bool was_async, void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
enum netfs_rreq_ref_trace what)
{ {
unsigned int debug_id; unsigned int debug_id;
bool dead; bool dead;
@ -156,15 +162,8 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
debug_id = rreq->debug_id; debug_id = rreq->debug_id;
dead = __refcount_dec_and_test(&rreq->ref, &r); dead = __refcount_dec_and_test(&rreq->ref, &r);
trace_netfs_rreq_ref(debug_id, r - 1, what); trace_netfs_rreq_ref(debug_id, r - 1, what);
if (dead) { if (dead)
if (was_async) { WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
rreq->work.func = netfs_free_request;
if (!queue_work(system_unbound_wq, &rreq->work))
WARN_ON(1);
} else {
netfs_free_request(&rreq->work);
}
}
} }
} }
@ -206,8 +205,7 @@ void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
what); what);
} }
static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
bool was_async)
{ {
struct netfs_io_request *rreq = subreq->rreq; struct netfs_io_request *rreq = subreq->rreq;
@ -216,10 +214,10 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
rreq->netfs_ops->free_subrequest(subreq); rreq->netfs_ops->free_subrequest(subreq);
mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
netfs_stat_d(&netfs_n_rh_sreq); netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
} }
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what) enum netfs_sreq_ref_trace what)
{ {
unsigned int debug_index = subreq->debug_index; unsigned int debug_index = subreq->debug_index;
@ -230,5 +228,5 @@ void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
dead = __refcount_dec_and_test(&subreq->ref, &r); dead = __refcount_dec_and_test(&subreq->ref, &r);
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
if (dead) if (dead)
netfs_free_subrequest(subreq, was_async); netfs_free_subrequest(subreq);
} }

View File

@ -83,14 +83,12 @@ static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
} }
just_unlock: just_unlock:
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (folio->index == rreq->no_unlock_folio &&
if (folio->index == rreq->no_unlock_folio && test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) { _debug("no unlock");
_debug("no unlock"); } else {
} else { trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
trace_netfs_folio(folio, netfs_folio_trace_read_unlock); folio_unlock(folio);
folio_unlock(folio);
}
} }
folioq_clear(folioq, slot); folioq_clear(folioq, slot);
@ -280,9 +278,13 @@ reassess:
stream->need_retry = true; stream->need_retry = true;
notes |= NEED_RETRY | MADE_PROGRESS; notes |= NEED_RETRY | MADE_PROGRESS;
break; break;
} else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
notes |= MADE_PROGRESS;
} else { } else {
if (!stream->failed) if (!stream->failed)
stream->transferred = stream->collected_to - rreq->start; stream->transferred += transferred;
if (front->transferred < front->len)
set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
notes |= MADE_PROGRESS; notes |= MADE_PROGRESS;
} }
@ -297,7 +299,7 @@ reassess:
struct netfs_io_subrequest, rreq_link); struct netfs_io_subrequest, rreq_link);
stream->front = front; stream->front = front;
spin_unlock(&rreq->lock); spin_unlock(&rreq->lock);
netfs_put_subrequest(remove, false, netfs_put_subrequest(remove,
notes & ABANDON_SREQ ? notes & ABANDON_SREQ ?
netfs_sreq_trace_put_abandon : netfs_sreq_trace_put_abandon :
netfs_sreq_trace_put_done); netfs_sreq_trace_put_done);
@ -311,14 +313,8 @@ reassess:
if (notes & NEED_RETRY) if (notes & NEED_RETRY)
goto need_retry; goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) {
trace_netfs_rreq(rreq, netfs_rreq_trace_unpause);
clear_bit_unlock(NETFS_RREQ_PAUSE, &rreq->flags);
smp_mb__after_atomic(); /* Set PAUSE before task state */
wake_up(&rreq->waitq);
}
if (notes & MADE_PROGRESS) { if (notes & MADE_PROGRESS) {
netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
//cond_resched(); //cond_resched();
goto reassess; goto reassess;
} }
@ -342,24 +338,10 @@ need_retry:
*/ */
static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
{ {
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned int i; unsigned int i;
/* Collect unbuffered reads and direct reads, adding up the transfer if (rreq->origin == NETFS_UNBUFFERED_READ ||
* sizes until we find the first short or failed subrequest. rreq->origin == NETFS_DIO_READ) {
*/
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
rreq->transferred += subreq->transferred;
if (subreq->transferred < subreq->len ||
test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
rreq->error = subreq->error;
break;
}
}
if (rreq->origin == NETFS_DIO_READ) {
for (i = 0; i < rreq->direct_bv_count; i++) { for (i = 0; i < rreq->direct_bv_count; i++) {
flush_dcache_page(rreq->direct_bv[i].bv_page); flush_dcache_page(rreq->direct_bv[i].bv_page);
// TODO: cifs marks pages in the destination buffer // TODO: cifs marks pages in the destination buffer
@ -377,7 +359,8 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
} }
if (rreq->netfs_ops->done) if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq); rreq->netfs_ops->done(rreq);
if (rreq->origin == NETFS_DIO_READ) if (rreq->origin == NETFS_UNBUFFERED_READ ||
rreq->origin == NETFS_DIO_READ)
inode_dio_end(rreq->inode); inode_dio_end(rreq->inode);
} }
@ -410,7 +393,7 @@ static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
* Note that we're in normal kernel thread context at this point, possibly * Note that we're in normal kernel thread context at this point, possibly
* running on a workqueue. * running on a workqueue.
*/ */
static void netfs_read_collection(struct netfs_io_request *rreq) bool netfs_read_collection(struct netfs_io_request *rreq)
{ {
struct netfs_io_stream *stream = &rreq->io_streams[0]; struct netfs_io_stream *stream = &rreq->io_streams[0];
@ -420,11 +403,11 @@ static void netfs_read_collection(struct netfs_io_request *rreq)
* queue is empty. * queue is empty.
*/ */
if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags)) if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
return; return false;
smp_rmb(); /* Read ALL_QUEUED before subreq lists. */ smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
if (!list_empty(&stream->subrequests)) if (!list_empty(&stream->subrequests))
return; return false;
/* Okay, declare that all I/O is complete. */ /* Okay, declare that all I/O is complete. */
rreq->transferred = stream->transferred; rreq->transferred = stream->transferred;
@ -433,6 +416,7 @@ static void netfs_read_collection(struct netfs_io_request *rreq)
//netfs_rreq_is_still_valid(rreq); //netfs_rreq_is_still_valid(rreq);
switch (rreq->origin) { switch (rreq->origin) {
case NETFS_UNBUFFERED_READ:
case NETFS_DIO_READ: case NETFS_DIO_READ:
case NETFS_READ_GAPS: case NETFS_READ_GAPS:
netfs_rreq_assess_dio(rreq); netfs_rreq_assess_dio(rreq);
@ -445,14 +429,15 @@ static void netfs_read_collection(struct netfs_io_request *rreq)
} }
task_io_account_read(rreq->transferred); task_io_account_read(rreq->transferred);
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
trace_netfs_rreq(rreq, netfs_rreq_trace_done); trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, false); netfs_clear_subrequests(rreq);
netfs_unlock_abandoned_read_pages(rreq); netfs_unlock_abandoned_read_pages(rreq);
if (unlikely(rreq->copy_to_cache)) if (unlikely(rreq->copy_to_cache))
netfs_pgpriv2_end_copy_to_cache(rreq); netfs_pgpriv2_end_copy_to_cache(rreq);
return true;
} }
void netfs_read_collection_worker(struct work_struct *work) void netfs_read_collection_worker(struct work_struct *work)
@ -460,26 +445,12 @@ void netfs_read_collection_worker(struct work_struct *work)
struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
netfs_see_request(rreq, netfs_rreq_trace_see_work); netfs_see_request(rreq, netfs_rreq_trace_see_work);
if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) {
netfs_read_collection(rreq); if (netfs_read_collection(rreq))
netfs_put_request(rreq, false, netfs_rreq_trace_put_work); /* Drop the ref from the IN_PROGRESS flag. */
} netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
else
/* netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
* Wake the collection work item.
*/
void netfs_wake_read_collector(struct netfs_io_request *rreq)
{
if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
!test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
if (!work_pending(&rreq->work)) {
netfs_get_request(rreq, netfs_rreq_trace_get_work);
if (!queue_work(system_unbound_wq, &rreq->work))
netfs_put_request(rreq, true, netfs_rreq_trace_put_work_nq);
}
} else {
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
wake_up(&rreq->waitq);
} }
} }
@ -511,7 +482,7 @@ void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
list_is_first(&subreq->rreq_link, &stream->subrequests) list_is_first(&subreq->rreq_link, &stream->subrequests)
) { ) {
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_wake_read_collector(rreq); netfs_wake_collector(rreq);
} }
} }
EXPORT_SYMBOL(netfs_read_subreq_progress); EXPORT_SYMBOL(netfs_read_subreq_progress);
@ -535,7 +506,6 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq) void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
{ {
struct netfs_io_request *rreq = subreq->rreq; struct netfs_io_request *rreq = subreq->rreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
switch (subreq->source) { switch (subreq->source) {
case NETFS_READ_FROM_CACHE: case NETFS_READ_FROM_CACHE:
@ -582,23 +552,15 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
} }
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
netfs_subreq_clear_in_progress(subreq);
clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
/* If we are at the head of the queue, wake up the collector. */
if (list_is_first(&subreq->rreq_link, &stream->subrequests) ||
test_bit(NETFS_RREQ_RETRYING, &rreq->flags))
netfs_wake_read_collector(rreq);
netfs_put_subrequest(subreq, true, netfs_sreq_trace_put_terminated);
} }
EXPORT_SYMBOL(netfs_read_subreq_terminated); EXPORT_SYMBOL(netfs_read_subreq_terminated);
/* /*
* Handle termination of a read from the cache. * Handle termination of a read from the cache.
*/ */
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async) void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error)
{ {
struct netfs_io_subrequest *subreq = priv; struct netfs_io_subrequest *subreq = priv;
@ -613,94 +575,3 @@ void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool
} }
netfs_read_subreq_terminated(subreq); netfs_read_subreq_terminated(subreq);
} }
/*
* Wait for the read operation to complete, successfully or otherwise.
*/
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
DEFINE_WAIT(myself);
ssize_t ret;
for (;;) {
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
subreq = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
if (subreq &&
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
__set_current_state(TASK_RUNNING);
netfs_read_collection(rreq);
continue;
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
break;
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
finish_wait(&rreq->waitq, &myself);
ret = rreq->error;
if (ret == 0) {
ret = rreq->transferred;
switch (rreq->origin) {
case NETFS_DIO_READ:
case NETFS_READ_SINGLE:
ret = rreq->transferred;
break;
default:
if (rreq->submitted < rreq->len) {
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
ret = -EIO;
}
break;
}
}
return ret;
}
/*
* Wait for a paused read operation to unpause or complete in some manner.
*/
void netfs_wait_for_pause(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
DEFINE_WAIT(myself);
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
for (;;) {
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
subreq = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
if (subreq &&
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
__set_current_state(TASK_RUNNING);
netfs_read_collection(rreq);
continue;
}
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
!test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
break;
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
finish_wait(&rreq->waitq, &myself);
}

View File

@ -116,7 +116,7 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
return creq; return creq;
cancel_put: cancel_put:
netfs_put_request(creq, false, netfs_rreq_trace_put_return); netfs_put_request(creq, netfs_rreq_trace_put_return);
cancel: cancel:
rreq->copy_to_cache = ERR_PTR(-ENOBUFS); rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags); clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
@ -155,7 +155,7 @@ void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
smp_wmb(); /* Write lists before ALL_QUEUED. */ smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags); set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
netfs_put_request(creq, false, netfs_rreq_trace_put_return); netfs_put_request(creq, netfs_rreq_trace_put_return);
creq->copy_to_cache = NULL; creq->copy_to_cache = NULL;
} }

View File

@ -173,7 +173,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
&stream->subrequests, rreq_link) { &stream->subrequests, rreq_link) {
trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous); trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
list_del(&subreq->rreq_link); list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
if (subreq == to) if (subreq == to)
break; break;
} }
@ -257,35 +257,15 @@ abandon:
*/ */
void netfs_retry_reads(struct netfs_io_request *rreq) void netfs_retry_reads(struct netfs_io_request *rreq)
{ {
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0]; struct netfs_io_stream *stream = &rreq->io_streams[0];
DEFINE_WAIT(myself);
netfs_stat(&netfs_n_rh_retry_read_req); netfs_stat(&netfs_n_rh_retry_read_req);
set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
/* Wait for all outstanding I/O to quiesce before performing retries as /* Wait for all outstanding I/O to quiesce before performing retries as
* we may need to renegotiate the I/O sizes. * we may need to renegotiate the I/O sizes.
*/ */
list_for_each_entry(subreq, &stream->subrequests, rreq_link) { set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) netfs_wait_for_in_progress_stream(rreq, stream);
continue;
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
for (;;) {
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
break;
trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
finish_wait(&rreq->waitq, &myself);
}
clear_bit(NETFS_RREQ_RETRYING, &rreq->flags); clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);

View File

@ -142,7 +142,7 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
return ret; return ret;
cancel: cancel:
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
return ret; return ret;
} }
@ -185,11 +185,11 @@ ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_ite
netfs_single_dispatch_read(rreq); netfs_single_dispatch_read(rreq);
ret = netfs_wait_for_read(rreq); ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, true, netfs_rreq_trace_put_return); netfs_put_request(rreq, netfs_rreq_trace_put_return);
return ret; return ret;
cleanup_free: cleanup_free:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); netfs_put_request(rreq, netfs_rreq_trace_put_failed);
return ret; return ret;
} }
EXPORT_SYMBOL(netfs_read_single); EXPORT_SYMBOL(netfs_read_single);

View File

@ -280,7 +280,7 @@ reassess_streams:
struct netfs_io_subrequest, rreq_link); struct netfs_io_subrequest, rreq_link);
stream->front = front; stream->front = front;
spin_unlock(&wreq->lock); spin_unlock(&wreq->lock);
netfs_put_subrequest(remove, false, netfs_put_subrequest(remove,
notes & SAW_FAILURE ? notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel : netfs_sreq_trace_put_cancel :
netfs_sreq_trace_put_done); netfs_sreq_trace_put_done);
@ -321,18 +321,14 @@ reassess_streams:
if (notes & NEED_RETRY) if (notes & NEED_RETRY)
goto need_retry; goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
smp_mb__after_atomic(); /* Set PAUSE before task state */
wake_up(&wreq->waitq);
}
if (notes & NEED_REASSESS) { if (notes & MADE_PROGRESS) {
netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
//cond_resched(); //cond_resched();
goto reassess_streams; goto reassess_streams;
} }
if (notes & MADE_PROGRESS) {
if (notes & NEED_REASSESS) {
//cond_resched(); //cond_resched();
goto reassess_streams; goto reassess_streams;
} }
@ -356,30 +352,21 @@ need_retry:
/* /*
* Perform the collection of subrequests, folios and encryption buffers. * Perform the collection of subrequests, folios and encryption buffers.
*/ */
void netfs_write_collection_worker(struct work_struct *work) bool netfs_write_collection(struct netfs_io_request *wreq)
{ {
struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
struct netfs_inode *ictx = netfs_inode(wreq->inode); struct netfs_inode *ictx = netfs_inode(wreq->inode);
size_t transferred; size_t transferred;
int s; int s;
_enter("R=%x", wreq->debug_id); _enter("R=%x", wreq->debug_id);
netfs_see_request(wreq, netfs_rreq_trace_see_work);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
return;
}
netfs_collect_write_results(wreq); netfs_collect_write_results(wreq);
/* We're done when the app thread has finished posting subreqs and all /* We're done when the app thread has finished posting subreqs and all
* the queues in all the streams are empty. * the queues in all the streams are empty.
*/ */
if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) { if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags))
netfs_put_request(wreq, false, netfs_rreq_trace_put_work); return false;
return;
}
smp_rmb(); /* Read ALL_QUEUED before lists. */ smp_rmb(); /* Read ALL_QUEUED before lists. */
transferred = LONG_MAX; transferred = LONG_MAX;
@ -387,10 +374,8 @@ void netfs_write_collection_worker(struct work_struct *work)
struct netfs_io_stream *stream = &wreq->io_streams[s]; struct netfs_io_stream *stream = &wreq->io_streams[s];
if (!stream->active) if (!stream->active)
continue; continue;
if (!list_empty(&stream->subrequests)) { if (!list_empty(&stream->subrequests))
netfs_put_request(wreq, false, netfs_rreq_trace_put_work); return false;
return;
}
if (stream->transferred < transferred) if (stream->transferred < transferred)
transferred = stream->transferred; transferred = stream->transferred;
} }
@ -428,8 +413,8 @@ void netfs_write_collection_worker(struct work_struct *work)
inode_dio_end(wreq->inode); inode_dio_end(wreq->inode);
_debug("finished"); _debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags); /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
if (wreq->iocb) { if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len); size_t written = min(wreq->transferred, wreq->len);
@ -440,19 +425,21 @@ void netfs_write_collection_worker(struct work_struct *work)
wreq->iocb = VFS_PTR_POISON; wreq->iocb = VFS_PTR_POISON;
} }
netfs_clear_subrequests(wreq, false); netfs_clear_subrequests(wreq);
netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete); return true;
} }
/* void netfs_write_collection_worker(struct work_struct *work)
* Wake the collection work item.
*/
void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
{ {
if (!work_pending(&wreq->work)) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
netfs_get_request(wreq, netfs_rreq_trace_get_work);
if (!queue_work(system_unbound_wq, &wreq->work)) netfs_see_request(rreq, netfs_rreq_trace_see_work);
netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq); if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) {
if (netfs_write_collection(rreq))
/* Drop the ref from the IN_PROGRESS flag. */
netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
else
netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
} }
} }
@ -460,7 +447,6 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* netfs_write_subrequest_terminated - Note the termination of a write operation. * netfs_write_subrequest_terminated - Note the termination of a write operation.
* @_op: The I/O request that has terminated. * @_op: The I/O request that has terminated.
* @transferred_or_error: The amount of data transferred or an error code. * @transferred_or_error: The amount of data transferred or an error code.
* @was_async: The termination was asynchronous
* *
* This tells the library that a contributory write I/O operation has * This tells the library that a contributory write I/O operation has
* terminated, one way or another, and that it should collect the results. * terminated, one way or another, and that it should collect the results.
@ -470,21 +456,16 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* negative error code. The library will look after reissuing I/O operations * negative error code. The library will look after reissuing I/O operations
* as appropriate and writing downloaded data to the cache. * as appropriate and writing downloaded data to the cache.
* *
* If @was_async is true, the caller might be running in softirq or interrupt
* context and we can't sleep.
*
* When this is called, ownership of the subrequest is transferred back to the * When this is called, ownership of the subrequest is transferred back to the
* library, along with a ref. * library, along with a ref.
* *
* Note that %_op is a void* so that the function can be passed to * Note that %_op is a void* so that the function can be passed to
* kiocb::term_func without the need for a casting wrapper. * kiocb::term_func without the need for a casting wrapper.
*/ */
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error)
bool was_async)
{ {
struct netfs_io_subrequest *subreq = _op; struct netfs_io_subrequest *subreq = _op;
struct netfs_io_request *wreq = subreq->rreq; struct netfs_io_request *wreq = subreq->rreq;
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
_enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
@ -495,8 +476,6 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
case NETFS_WRITE_TO_CACHE: case NETFS_WRITE_TO_CACHE:
netfs_stat(&netfs_n_wh_write_done); netfs_stat(&netfs_n_wh_write_done);
break; break;
case NETFS_INVALID_WRITE:
break;
default: default:
BUG(); BUG();
} }
@ -536,15 +515,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
} }
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
netfs_subreq_clear_in_progress(subreq);
clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
/* If we are at the head of the queue, wake up the collector,
* transferring a ref to it if we were the ones to do so.
*/
if (list_is_first(&subreq->rreq_link, &stream->subrequests))
netfs_wake_write_collector(wreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
} }
EXPORT_SYMBOL(netfs_write_subrequest_terminated); EXPORT_SYMBOL(netfs_write_subrequest_terminated);

View File

@ -134,7 +134,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
return wreq; return wreq;
nomem: nomem:
wreq->error = -ENOMEM; wreq->error = -ENOMEM;
netfs_put_request(wreq, false, netfs_rreq_trace_put_failed); netfs_put_request(wreq, netfs_rreq_trace_put_failed);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -233,7 +233,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false); return netfs_write_subrequest_terminated(subreq, subreq->error);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit); trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
stream->issue_write(subreq); stream->issue_write(subreq);
@ -542,7 +542,7 @@ static void netfs_end_issue_write(struct netfs_io_request *wreq)
} }
if (needs_poke) if (needs_poke)
netfs_wake_write_collector(wreq, false); netfs_wake_collector(wreq);
} }
/* /*
@ -576,6 +576,7 @@ int netfs_writepages(struct address_space *mapping,
goto couldnt_start; goto couldnt_start;
} }
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
trace_netfs_write(wreq, netfs_write_trace_writeback); trace_netfs_write(wreq, netfs_write_trace_writeback);
netfs_stat(&netfs_n_wh_writepages); netfs_stat(&netfs_n_wh_writepages);
@ -599,8 +600,9 @@ int netfs_writepages(struct address_space *mapping,
netfs_end_issue_write(wreq); netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
netfs_wake_collector(wreq);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return); netfs_put_request(wreq, netfs_rreq_trace_put_return);
_leave(" = %d", error); _leave(" = %d", error);
return error; return error;
@ -673,11 +675,11 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
/* /*
* End a write operation used when writing through the pagecache. * End a write operation used when writing through the pagecache.
*/ */
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache) struct folio *writethrough_cache)
{ {
struct netfs_inode *ictx = netfs_inode(wreq->inode); struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret; ssize_t ret;
_enter("R=%x", wreq->debug_id); _enter("R=%x", wreq->debug_id);
@ -688,13 +690,11 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
if (wreq->iocb) { if (wreq->iocb)
ret = -EIOCBQUEUED; ret = -EIOCBQUEUED;
} else { else
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); ret = netfs_wait_for_write(wreq);
ret = wreq->error; netfs_put_request(wreq, netfs_rreq_trace_put_return);
}
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
return ret; return ret;
} }
@ -722,10 +722,8 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
start += part; start += part;
len -= part; len -= part;
rolling_buffer_advance(&wreq->buffer, part); rolling_buffer_advance(&wreq->buffer, part);
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); netfs_wait_for_paused_write(wreq);
wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags));
}
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
break; break;
} }
@ -885,7 +883,8 @@ int netfs_writeback_single(struct address_space *mapping,
goto couldnt_start; goto couldnt_start;
} }
trace_netfs_write(wreq, netfs_write_trace_writeback); __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
trace_netfs_write(wreq, netfs_write_trace_writeback_single);
netfs_stat(&netfs_n_wh_writepages); netfs_stat(&netfs_n_wh_writepages);
if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
@ -914,8 +913,9 @@ stop:
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
netfs_wake_collector(wreq);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return); netfs_put_request(wreq, netfs_rreq_trace_put_return);
_leave(" = %d", ret); _leave(" = %d", ret);
return ret; return ret;

View File

@ -39,9 +39,10 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break; break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
struct iov_iter source = subreq->io_iter; struct iov_iter source;
iov_iter_revert(&source, subreq->len - source.count); netfs_reset_iter(subreq);
source = subreq->io_iter;
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq, &source); netfs_reissue_write(stream, subreq, &source);
} }
@ -131,7 +132,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
&stream->subrequests, rreq_link) { &stream->subrequests, rreq_link) {
trace_netfs_sreq(subreq, netfs_sreq_trace_discard); trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
list_del(&subreq->rreq_link); list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
if (subreq == to) if (subreq == to)
break; break;
} }
@ -199,7 +200,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
*/ */
void netfs_retry_writes(struct netfs_io_request *wreq) void netfs_retry_writes(struct netfs_io_request *wreq)
{ {
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream; struct netfs_io_stream *stream;
int s; int s;
@ -208,16 +208,13 @@ void netfs_retry_writes(struct netfs_io_request *wreq)
/* Wait for all outstanding I/O to quiesce before performing retries as /* Wait for all outstanding I/O to quiesce before performing retries as
* we may need to renegotiate the I/O sizes. * we may need to renegotiate the I/O sizes.
*/ */
set_bit(NETFS_RREQ_RETRYING, &wreq->flags);
for (s = 0; s < NR_IO_STREAMS; s++) { for (s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s]; stream = &wreq->io_streams[s];
if (!stream->active) if (stream->active)
continue; netfs_wait_for_in_progress_stream(wreq, stream);
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE);
}
} }
clear_bit(NETFS_RREQ_RETRYING, &wreq->flags);
// TODO: Enc: Fetch changed partial pages // TODO: Enc: Fetch changed partial pages
// TODO: Enc: Reencrypt content if needed. // TODO: Enc: Reencrypt content if needed.

View File

@ -367,6 +367,7 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
sreq = netfs->sreq; sreq = netfs->sreq;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags) && if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
sreq->rreq->origin != NETFS_UNBUFFERED_READ &&
sreq->rreq->origin != NETFS_DIO_READ) sreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);

View File

@ -151,8 +151,7 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
bool from_readdir); bool from_readdir);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written); unsigned int bytes_written);
void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result);
bool was_async);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
int flags, int flags,

View File

@ -1725,7 +1725,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight, server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear); 0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0; wdata->credits.value = 0;
cifs_write_subrequest_terminated(wdata, result, true); cifs_write_subrequest_terminated(wdata, result);
release_mid(mid); release_mid(mid);
trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0, trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0,
server->credits, server->in_flight, server->credits, server->in_flight,
@ -1813,7 +1813,7 @@ async_writev_out:
out: out:
if (rc) { if (rc) {
add_credits_and_wake_if(wdata->server, &wdata->credits, 0); add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
cifs_write_subrequest_terminated(wdata, rc, false); cifs_write_subrequest_terminated(wdata, rc);
} }
} }

View File

@ -130,7 +130,7 @@ fail:
else else
trace_netfs_sreq(subreq, netfs_sreq_trace_fail); trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
add_credits_and_wake_if(wdata->server, &wdata->credits, 0); add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
cifs_write_subrequest_terminated(wdata, rc, false); cifs_write_subrequest_terminated(wdata, rc);
goto out; goto out;
} }
@ -219,7 +219,8 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
goto failed; goto failed;
} }
if (subreq->rreq->origin != NETFS_DIO_READ) if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit); trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
@ -2423,8 +2424,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
return rc; return rc;
} }
void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
bool was_async)
{ {
struct netfs_io_request *wreq = wdata->rreq; struct netfs_io_request *wreq = wdata->rreq;
struct netfs_inode *ictx = netfs_inode(wreq->inode); struct netfs_inode *ictx = netfs_inode(wreq->inode);
@ -2441,7 +2441,7 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t
netfs_resize_file(ictx, wrend, true); netfs_resize_file(ictx, wrend, true);
} }
netfs_write_subrequest_terminated(&wdata->subreq, result, was_async); netfs_write_subrequest_terminated(&wdata->subreq, result);
} }
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,

View File

@ -4888,7 +4888,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
0, cifs_trace_rw_credits_write_response_clear); 0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0; wdata->credits.value = 0;
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress); trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
cifs_write_subrequest_terminated(wdata, result ?: written, true); cifs_write_subrequest_terminated(wdata, result ?: written);
release_mid(mid); release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
server->credits, server->in_flight, server->credits, server->in_flight,
@ -5061,7 +5061,7 @@ out:
-(int)wdata->credits.value, -(int)wdata->credits.value,
cifs_trace_rw_credits_write_response_clear); cifs_trace_rw_credits_write_response_clear);
add_credits_and_wake_if(wdata->server, &wdata->credits, 0); add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
cifs_write_subrequest_terminated(wdata, rc, true); cifs_write_subrequest_terminated(wdata, rc);
} }
} }

View File

@ -34,7 +34,6 @@ struct folio_queue {
struct folio_queue *prev; /* Previous queue segment of NULL */ struct folio_queue *prev; /* Previous queue segment of NULL */
unsigned long marks; /* 1-bit mark per folio */ unsigned long marks; /* 1-bit mark per folio */
unsigned long marks2; /* Second 1-bit mark per folio */ unsigned long marks2; /* Second 1-bit mark per folio */
unsigned long marks3; /* Third 1-bit mark per folio */
#if PAGEVEC_SIZE > BITS_PER_LONG #if PAGEVEC_SIZE > BITS_PER_LONG
#error marks is not big enough #error marks is not big enough
#endif #endif
@ -58,7 +57,6 @@ static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
folioq->prev = NULL; folioq->prev = NULL;
folioq->marks = 0; folioq->marks = 0;
folioq->marks2 = 0; folioq->marks2 = 0;
folioq->marks3 = 0;
folioq->rreq_id = rreq_id; folioq->rreq_id = rreq_id;
folioq->debug_id = 0; folioq->debug_id = 0;
} }
@ -178,45 +176,6 @@ static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
clear_bit(slot, &folioq->marks2); clear_bit(slot, &folioq->marks2);
} }
/**
* folioq_is_marked3: Check third folio mark in a folio queue segment
* @folioq: The segment to query
* @slot: The slot number of the folio to query
*
* Determine if the third mark is set for the folio in the specified slot in a
* folio queue segment.
*/
static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks3);
}
/**
* folioq_mark3: Set the third mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Set the third mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks3);
}
/**
* folioq_unmark3: Clear the third mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Clear the third mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks3);
}
/** /**
* folioq_append: Add a folio to a folio queue segment * folioq_append: Add a folio to a folio queue segment
* @folioq: The segment to add to * @folioq: The segment to add to
@ -318,7 +277,6 @@ static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
folioq->vec.folios[slot] = NULL; folioq->vec.folios[slot] = NULL;
folioq_unmark(folioq, slot); folioq_unmark(folioq, slot);
folioq_unmark2(folioq, slot); folioq_unmark2(folioq, slot);
folioq_unmark3(folioq, slot);
} }
#endif /* _LINUX_FOLIO_QUEUE_H */ #endif /* _LINUX_FOLIO_QUEUE_H */

View File

@ -498,9 +498,6 @@ static inline void fscache_end_operation(struct netfs_cache_resources *cres)
* *
* NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read). * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
* *
* NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
* skipped over, then do as for IGNORE.
*
* NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole. * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/ */
static inline static inline
@ -628,7 +625,7 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
term_func, term_func_priv, term_func, term_func_priv,
using_pgpriv2, caching); using_pgpriv2, caching);
else if (term_func) else if (term_func)
term_func(term_func_priv, -ENOBUFS, false); term_func(term_func_priv, -ENOBUFS);
} }

View File

@ -48,11 +48,9 @@ enum netfs_io_source {
NETFS_INVALID_READ, NETFS_INVALID_READ,
NETFS_UPLOAD_TO_SERVER, NETFS_UPLOAD_TO_SERVER,
NETFS_WRITE_TO_CACHE, NETFS_WRITE_TO_CACHE,
NETFS_INVALID_WRITE,
} __mode(byte); } __mode(byte);
typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error);
bool was_async);
/* /*
* Per-inode context. This wraps the VFS inode. * Per-inode context. This wraps the VFS inode.
@ -71,7 +69,6 @@ struct netfs_inode {
unsigned long flags; unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */ #define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */ #define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */
}; };
@ -146,8 +143,8 @@ struct netfs_io_stream {
struct netfs_io_subrequest *front; /* Op being collected */ struct netfs_io_subrequest *front; /* Op being collected */
unsigned long long collected_to; /* Position we've collected results to */ unsigned long long collected_to; /* Position we've collected results to */
size_t transferred; /* The amount transferred from this stream */ size_t transferred; /* The amount transferred from this stream */
enum netfs_io_source source; /* Where to read from/write to */
unsigned short error; /* Aggregate error for the stream */ unsigned short error; /* Aggregate error for the stream */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* Index of stream in parent table */ unsigned char stream_nr; /* Index of stream in parent table */
bool avail; /* T if stream is available */ bool avail; /* T if stream is available */
bool active; /* T if stream is active */ bool active; /* T if stream is active */
@ -191,7 +188,6 @@ struct netfs_io_subrequest {
unsigned long flags; unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */ #define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ #define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
@ -207,6 +203,7 @@ enum netfs_io_origin {
NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
NETFS_READ_SINGLE, /* This read should be treated as a single object */ NETFS_READ_SINGLE, /* This read should be treated as a single object */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
NETFS_UNBUFFERED_READ, /* This is an unbuffered read */
NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_WRITEBACK, /* This write was triggered by writepages */ NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */ NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */
@ -223,16 +220,18 @@ enum netfs_io_origin {
*/ */
struct netfs_io_request { struct netfs_io_request {
union { union {
struct work_struct work; struct work_struct cleanup_work; /* Deferred cleanup work */
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct work_struct work; /* Result collector work */
struct inode *inode; /* The file being accessed */ struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */ struct address_space *mapping; /* The mapping being accessed */
struct kiocb *iocb; /* AIO completion vector */ struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources; struct netfs_cache_resources cache_resources;
struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */ struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */
struct readahead_control *ractl; /* Readahead descriptor */ #ifdef CONFIG_PROC_FS
struct list_head proc_link; /* Link in netfs_iorequests */ struct list_head proc_link; /* Link in netfs_iorequests */
#endif
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams #define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */ struct netfs_group *group; /* Writeback group being written back */
@ -243,19 +242,10 @@ struct netfs_io_request {
void *netfs_priv; /* Private data for the netfs */ void *netfs_priv; /* Private data for the netfs */
void *netfs_priv2; /* Private data for the netfs */ void *netfs_priv2; /* Private data for the netfs */
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
unsigned int rsize; /* Maximum read size (0 for none) */
unsigned int wsize; /* Maximum write size (0 for none) */
atomic_t subreq_counter; /* Next subreq->debug_index */
unsigned int nr_group_rel; /* Number of refs to release on ->group */
spinlock_t lock; /* Lock for queuing subreqs */
unsigned long long submitted; /* Amount submitted for I/O so far */ unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */ unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */ size_t transferred; /* Amount to be indicated as transferred */
long error; /* 0 or error that occurred */ long error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
unsigned long long i_size; /* Size of the file */ unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */ unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */ atomic64_t issued_to; /* Write issuer folio cursor */
@ -263,22 +253,29 @@ struct netfs_io_request {
unsigned long long cleaned_to; /* Position we've cleaned folios to */ unsigned long long cleaned_to; /* Position we've cleaned folios to */
unsigned long long abandon_to; /* Position to abandon folios to */ unsigned long long abandon_to; /* Position to abandon folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
unsigned int rsize; /* Maximum read size (0 for none) */
unsigned int wsize; /* Maximum write size (0 for none) */
atomic_t subreq_counter; /* Next subreq->debug_index */
unsigned int nr_group_rel; /* Number of refs to release on ->group */
spinlock_t lock; /* Lock for queuing subreqs */
unsigned char front_folio_order; /* Order (size) of front folio */ unsigned char front_folio_order; /* Order (size) of front folio */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
refcount_t ref; refcount_t ref;
unsigned long flags; unsigned long flags;
#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */ #define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes (has ref) */
#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */ #define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ #define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ #define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ #define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
#define NETFS_RREQ_RETRYING 14 /* Set if we're in the retry path */ #define NETFS_RREQ_RETRYING 14 /* Set if we're in the retry path */
#define NETFS_RREQ_SHORT_TRANSFER 15 /* Set if we have a short transfer */
#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */ * write to cache on read */
const struct netfs_request_ops *netfs_ops; const struct netfs_request_ops *netfs_ops;
@ -321,7 +318,6 @@ struct netfs_request_ops {
*/ */
enum netfs_read_from_hole { enum netfs_read_from_hole {
NETFS_READ_HOLE_IGNORE, NETFS_READ_HOLE_IGNORE,
NETFS_READ_HOLE_CLEAR,
NETFS_READ_HOLE_FAIL, NETFS_READ_HOLE_FAIL,
}; };
@ -439,15 +435,14 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq, void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what); enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what); enum netfs_sreq_ref_trace what);
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new, struct iov_iter *new,
iov_iter_extraction_t extraction_flags); iov_iter_extraction_t extraction_flags);
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs); size_t max_size, size_t max_segs);
void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error);
bool was_async);
void netfs_queue_write_request(struct netfs_io_subrequest *subreq); void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
int netfs_start_io_read(struct inode *inode); int netfs_start_io_read(struct inode *inode);

View File

@ -30,6 +30,7 @@
EM(netfs_write_trace_dio_write, "DIO-WRITE") \ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(netfs_write_trace_writeback, "WRITEBACK") \ EM(netfs_write_trace_writeback, "WRITEBACK") \
EM(netfs_write_trace_writeback_single, "WB-SINGLE") \
E_(netfs_write_trace_writethrough, "WRITETHRU") E_(netfs_write_trace_writethrough, "WRITETHRU")
#define netfs_rreq_origins \ #define netfs_rreq_origins \
@ -38,6 +39,7 @@
EM(NETFS_READ_GAPS, "RG") \ EM(NETFS_READ_GAPS, "RG") \
EM(NETFS_READ_SINGLE, "R1") \ EM(NETFS_READ_SINGLE, "R1") \
EM(NETFS_READ_FOR_WRITE, "RW") \ EM(NETFS_READ_FOR_WRITE, "RW") \
EM(NETFS_UNBUFFERED_READ, "UR") \
EM(NETFS_DIO_READ, "DR") \ EM(NETFS_DIO_READ, "DR") \
EM(NETFS_WRITEBACK, "WB") \ EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITEBACK_SINGLE, "W1") \ EM(NETFS_WRITEBACK_SINGLE, "W1") \
@ -77,8 +79,7 @@
EM(NETFS_READ_FROM_CACHE, "READ") \ EM(NETFS_READ_FROM_CACHE, "READ") \
EM(NETFS_INVALID_READ, "INVL") \ EM(NETFS_INVALID_READ, "INVL") \
EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
EM(NETFS_WRITE_TO_CACHE, "WRIT") \ E_(NETFS_WRITE_TO_CACHE, "WRIT")
E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \ #define netfs_sreq_traces \
EM(netfs_sreq_trace_add_donations, "+DON ") \ EM(netfs_sreq_trace_add_donations, "+DON ") \
@ -128,17 +129,15 @@
#define netfs_rreq_ref_traces \ #define netfs_rreq_ref_traces \
EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_get_work, "GET WORK ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
EM(netfs_rreq_trace_put_return, "PUT RETURN ") \ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
EM(netfs_rreq_trace_put_work, "PUT WORK ") \ EM(netfs_rreq_trace_put_work_ip, "PUT WORK IP ") \
EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \
EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \
EM(netfs_rreq_trace_see_work, "SEE WORK ") \ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
EM(netfs_rreq_trace_see_work_complete, "SEE WORK CP") \
E_(netfs_rreq_trace_new, "NEW ") E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \ #define netfs_sreq_ref_traces \

View File

@ -1704,7 +1704,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
start, len, &subreq->io_iter); start, len, &subreq->io_iter);
} }
if (IS_ERR(req)) { if (IS_ERR(req)) {
netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false); netfs_write_subrequest_terminated(subreq, PTR_ERR(req));
return; return;
} }
@ -1712,7 +1712,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
if (err) { if (err) {
trace_9p_protocol_dump(clnt, &req->rc); trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req); p9_req_put(clnt, req);
netfs_write_subrequest_terminated(subreq, err, false); netfs_write_subrequest_terminated(subreq, err);
return; return;
} }
@ -1724,7 +1724,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len); p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
p9_req_put(clnt, req); p9_req_put(clnt, req);
netfs_write_subrequest_terminated(subreq, written, false); netfs_write_subrequest_terminated(subreq, written);
} }
EXPORT_SYMBOL(p9_client_write_subreq); EXPORT_SYMBOL(p9_client_write_subreq);