fs/ntfs3: remove ntfs_bio_pages and use page cache for compressed I/O

Replace the use of ntfs_bio_pages with the disk page cache for reading and
writing compressed files. This slightly improves performance when reading
compressed data and simplifies the I/O logic.

When an XPRESS or LZX compressed file is opened for writing, it is now
decompressed into a normal file before modification. A new argument (`int copy`)
is added to ni_read_frame() to handle writing of decompressed and mapped data.

Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
pull/1354/merge
Konstantin Komarov 2025-10-14 20:36:17 +03:00
parent c3856bb499
commit f35590ee26
No known key found for this signature in database
GPG Key ID: A9B0331F832407B6
6 changed files with 116 additions and 194 deletions

View File

@ -1457,7 +1457,6 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
pgoff_t index = vbo[i] >> PAGE_SHIFT; pgoff_t index = vbo[i] >> PAGE_SHIFT;
if (index != folio->index) { if (index != folio->index) {
struct page *page = &folio->page;
u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
u64 to = min(from + PAGE_SIZE, wof_size); u64 to = min(from + PAGE_SIZE, wof_size);
@ -1467,8 +1466,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
if (err) if (err)
goto out1; goto out1;
err = ntfs_bio_pages(sbi, run, &page, 1, from, err = ntfs_read_run(sbi, run, addr, from, to - from);
to - from, REQ_OP_READ);
if (err) { if (err) {
folio->index = -1; folio->index = -1;
goto out1; goto out1;

View File

@ -59,7 +59,7 @@ static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
{ {
u8 user[FSLABEL_MAX] = {0}; u8 user[FSLABEL_MAX] = { 0 };
int len; int len;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
@ -1039,7 +1039,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (!frame_uptodate && off) { if (!frame_uptodate && off) {
err = ni_read_frame(ni, frame_vbo, pages, err = ni_read_frame(ni, frame_vbo, pages,
pages_per_frame); pages_per_frame, 0);
if (err) { if (err) {
for (ip = 0; ip < pages_per_frame; ip++) { for (ip = 0; ip < pages_per_frame; ip++) {
folio = page_folio(pages[ip]); folio = page_folio(pages[ip]);
@ -1104,7 +1104,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (off || (to < i_size && (to & (frame_size - 1)))) { if (off || (to < i_size && (to & (frame_size - 1)))) {
err = ni_read_frame(ni, frame_vbo, pages, err = ni_read_frame(ni, frame_vbo, pages,
pages_per_frame); pages_per_frame, 0);
if (err) { if (err) {
for (ip = 0; ip < pages_per_frame; for (ip = 0; ip < pages_per_frame;
ip++) { ip++) {

View File

@ -2105,7 +2105,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
pages[i] = pg; pages[i] = pg;
} }
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame); err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0);
out1: out1:
for (i = 0; i < pages_per_frame; i++) { for (i = 0; i < pages_per_frame; i++) {
@ -2175,17 +2175,9 @@ int ni_decompress_file(struct ntfs_inode *ni)
*/ */
index = 0; index = 0;
for (vbo = 0; vbo < i_size; vbo += bytes) { for (vbo = 0; vbo < i_size; vbo += bytes) {
u32 nr_pages;
bool new; bool new;
if (vbo + frame_size > i_size) { bytes = vbo + frame_size > i_size ? (i_size - vbo) : frame_size;
bytes = i_size - vbo;
nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else {
nr_pages = pages_per_frame;
bytes = frame_size;
}
end = bytes_to_cluster(sbi, vbo + bytes); end = bytes_to_cluster(sbi, vbo + bytes);
for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) { for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
@ -2210,15 +2202,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
pages[i] = pg; pages[i] = pg;
} }
err = ni_read_frame(ni, vbo, pages, pages_per_frame); err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);
if (!err) {
down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, &ni->file.run, pages,
nr_pages, vbo, bytes,
REQ_OP_WRITE);
up_read(&ni->file.run_lock);
}
for (i = 0; i < pages_per_frame; i++) { for (i = 0; i < pages_per_frame; i++) {
unlock_page(pages[i]); unlock_page(pages[i]);
@ -2408,20 +2392,19 @@ out2:
* Pages - Array of locked pages. * Pages - Array of locked pages.
*/ */
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
u32 pages_per_frame) u32 pages_per_frame, int copy)
{ {
int err; int err;
struct ntfs_sb_info *sbi = ni->mi.sbi; struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits; u8 cluster_bits = sbi->cluster_bits;
char *frame_ondisk = NULL; char *frame_ondisk = NULL;
char *frame_mem = NULL; char *frame_mem = NULL;
struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL; struct ATTR_LIST_ENTRY *le = NULL;
struct runs_tree *run = &ni->file.run; struct runs_tree *run = &ni->file.run;
u64 valid_size = ni->i_valid; u64 valid_size = ni->i_valid;
u64 vbo_disk; u64 vbo_disk;
size_t unc_size; size_t unc_size;
u32 frame_size, i, npages_disk, ondisk_size; u32 frame_size, i, ondisk_size;
struct page *pg; struct page *pg;
struct ATTRIB *attr; struct ATTRIB *attr;
CLST frame, clst_data; CLST frame, clst_data;
@ -2513,7 +2496,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
err = attr_wof_frame_info(ni, attr, run, frame64, frames, err = attr_wof_frame_info(ni, attr, run, frame64, frames,
frame_bits, &ondisk_size, &vbo_data); frame_bits, &ondisk_size, &vbo_data);
if (err) if (err)
goto out2; goto out1;
if (frame64 == frames) { if (frame64 == frames) {
unc_size = 1 + ((i_size - 1) & (frame_size - 1)); unc_size = 1 + ((i_size - 1) & (frame_size - 1));
@ -2524,7 +2507,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (ondisk_size > frame_size) { if (ondisk_size > frame_size) {
err = -EINVAL; err = -EINVAL;
goto out2; goto out1;
} }
if (!attr->non_res) { if (!attr->non_res) {
@ -2545,10 +2528,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
ARRAY_SIZE(WOF_NAME), run, vbo_disk, ARRAY_SIZE(WOF_NAME), run, vbo_disk,
vbo_data + ondisk_size); vbo_data + ondisk_size);
if (err) if (err)
goto out2; goto out1;
npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
PAGE_SIZE - 1) >>
PAGE_SHIFT;
#endif #endif
} else if (is_attr_compressed(attr)) { } else if (is_attr_compressed(attr)) {
/* LZNT compression. */ /* LZNT compression. */
@ -2582,60 +2562,37 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
if (clst_data >= NTFS_LZNT_CLUSTERS) { if (clst_data >= NTFS_LZNT_CLUSTERS) {
/* Frame is not compressed. */ /* Frame is not compressed. */
down_read(&ni->file.run_lock); down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, run, pages, pages_per_frame, err = ntfs_read_run(sbi, run, frame_mem, frame_vbo,
frame_vbo, ondisk_size, ondisk_size);
REQ_OP_READ);
up_read(&ni->file.run_lock); up_read(&ni->file.run_lock);
goto out1; goto out1;
} }
vbo_disk = frame_vbo; vbo_disk = frame_vbo;
npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else { } else {
__builtin_unreachable(); __builtin_unreachable();
err = -EINVAL; err = -EINVAL;
goto out1; goto out1;
} }
pages_disk = kcalloc(npages_disk, sizeof(*pages_disk), GFP_NOFS); /* Allocate memory to read compressed data to. */
if (!pages_disk) { frame_ondisk = kvmalloc(ondisk_size, GFP_KERNEL);
if (!frame_ondisk) {
err = -ENOMEM; err = -ENOMEM;
goto out2; goto out1;
}
for (i = 0; i < npages_disk; i++) {
pg = alloc_page(GFP_KERNEL);
if (!pg) {
err = -ENOMEM;
goto out3;
}
pages_disk[i] = pg;
lock_page(pg);
} }
/* Read 'ondisk_size' bytes from disk. */ /* Read 'ondisk_size' bytes from disk. */
down_read(&ni->file.run_lock); down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk, err = ntfs_read_run(sbi, run, frame_ondisk, vbo_disk, ondisk_size);
ondisk_size, REQ_OP_READ);
up_read(&ni->file.run_lock); up_read(&ni->file.run_lock);
if (err) if (err)
goto out3; goto out2;
/*
* To simplify decompress algorithm do vmap for source and target pages.
*/
frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
if (!frame_ondisk) {
err = -ENOMEM;
goto out3;
}
/* Decompress: Frame_ondisk -> frame_mem. */
#ifdef CONFIG_NTFS3_LZX_XPRESS #ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run) { if (run != &ni->file.run) {
/* LZX or XPRESS */ /* LZX or XPRESS */
err = decompress_lzx_xpress( err = decompress_lzx_xpress(sbi, frame_ondisk, ondisk_size,
sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)), frame_mem, unc_size, frame_size);
ondisk_size, frame_mem, unc_size, frame_size);
} else } else
#endif #endif
{ {
@ -2653,24 +2610,21 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
memset(frame_mem + ok, 0, frame_size - ok); memset(frame_mem + ok, 0, frame_size - ok);
} }
vunmap(frame_ondisk);
out3:
for (i = 0; i < npages_disk; i++) {
pg = pages_disk[i];
if (pg) {
unlock_page(pg);
put_page(pg);
}
}
kfree(pages_disk);
out2: out2:
kvfree(frame_ondisk);
out1:
#ifdef CONFIG_NTFS3_LZX_XPRESS #ifdef CONFIG_NTFS3_LZX_XPRESS
if (run != &ni->file.run) if (run != &ni->file.run)
run_free(run); run_free(run);
if (!err && copy) {
/* We are called from 'ni_decompress_file' */
/* Copy decompressed LZX or XPRESS data into new place. */
down_read(&ni->file.run_lock);
err = ntfs_write_run(sbi, &ni->file.run, frame_mem, frame_vbo,
frame_size);
up_read(&ni->file.run_lock);
}
#endif #endif
out1:
vunmap(frame_mem); vunmap(frame_mem);
out: out:
for (i = 0; i < pages_per_frame; i++) { for (i = 0; i < pages_per_frame; i++) {
@ -2697,13 +2651,10 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u64 frame_vbo = folio_pos(folio); u64 frame_vbo = folio_pos(folio);
CLST frame = frame_vbo >> frame_bits; CLST frame = frame_vbo >> frame_bits;
char *frame_ondisk = NULL; char *frame_ondisk = NULL;
struct page **pages_disk = NULL;
struct ATTR_LIST_ENTRY *le = NULL; struct ATTR_LIST_ENTRY *le = NULL;
char *frame_mem; char *frame_mem;
struct ATTRIB *attr; struct ATTRIB *attr;
struct mft_inode *mi; struct mft_inode *mi;
u32 i;
struct page *pg;
size_t compr_size, ondisk_size; size_t compr_size, ondisk_size;
struct lznt *lznt; struct lznt *lznt;
@ -2738,34 +2689,18 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out; goto out;
} }
pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS); /* Allocate memory to write compressed data to. */
if (!pages_disk) { frame_ondisk = kvmalloc(frame_size, GFP_KERNEL);
err = -ENOMEM;
goto out;
}
for (i = 0; i < pages_per_frame; i++) {
pg = alloc_page(GFP_KERNEL);
if (!pg) {
err = -ENOMEM;
goto out1;
}
pages_disk[i] = pg;
lock_page(pg);
}
/* To simplify compress algorithm do vmap for source and target pages. */
frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
if (!frame_ondisk) { if (!frame_ondisk) {
err = -ENOMEM; err = -ENOMEM;
goto out1; goto out;
} }
/* Map in-memory frame for read-only. */ /* Map in-memory frame for read-only. */
frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO); frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
if (!frame_mem) { if (!frame_mem) {
err = -ENOMEM; err = -ENOMEM;
goto out2; goto out1;
} }
mutex_lock(&sbi->compress.mtx_lznt); mutex_lock(&sbi->compress.mtx_lznt);
@ -2781,7 +2716,7 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
if (!lznt) { if (!lznt) {
mutex_unlock(&sbi->compress.mtx_lznt); mutex_unlock(&sbi->compress.mtx_lznt);
err = -ENOMEM; err = -ENOMEM;
goto out3; goto out2;
} }
sbi->compress.lznt = lznt; sbi->compress.lznt = lznt;
@ -2818,25 +2753,16 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
goto out2; goto out2;
down_read(&ni->file.run_lock); down_read(&ni->file.run_lock);
err = ntfs_bio_pages(sbi, &ni->file.run, err = ntfs_write_run(sbi, &ni->file.run,
ondisk_size < frame_size ? pages_disk : pages, ondisk_size < frame_size ? frame_ondisk :
pages_per_frame, frame_vbo, ondisk_size, frame_mem,
REQ_OP_WRITE); frame_vbo, ondisk_size);
up_read(&ni->file.run_lock); up_read(&ni->file.run_lock);
out3:
vunmap(frame_mem);
out2: out2:
vunmap(frame_ondisk); vunmap(frame_mem);
out1: out1:
for (i = 0; i < pages_per_frame; i++) { kvfree(frame_ondisk);
pg = pages_disk[i];
if (pg) {
unlock_page(pg);
put_page(pg);
}
}
kfree(pages_disk);
out: out:
return err; return err;
} }

View File

@ -1479,99 +1479,86 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
} }
/* /*
* ntfs_bio_pages - Read/write pages from/to disk. * ntfs_read_write_run - Read/Write disk's page cache.
*/ */
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, void *buf, u64 vbo, size_t bytes, int wr)
enum req_op op)
{ {
int err = 0;
struct bio *new, *bio = NULL;
struct super_block *sb = sbi->sb; struct super_block *sb = sbi->sb;
struct block_device *bdev = sb->s_bdev; struct address_space *mapping = sb->s_bdev->bd_mapping;
struct page *page;
u8 cluster_bits = sbi->cluster_bits; u8 cluster_bits = sbi->cluster_bits;
CLST lcn, clen, vcn, vcn_next; CLST vcn_next, vcn = vbo >> cluster_bits;
u32 add, off, page_idx; CLST lcn, clen;
u64 lbo, len; u64 lbo, len;
size_t run_idx; size_t idx;
struct blk_plug plug; u32 off, op;
struct folio *folio;
char *kaddr;
if (!bytes) if (!bytes)
return 0; return 0;
blk_start_plug(&plug); if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
return -ENOENT;
/* Align vbo and bytes to be 512 bytes aligned. */ if (lcn == SPARSE_LCN)
lbo = (vbo + bytes + 511) & ~511ull; return -EINVAL;
vbo = vbo & ~511ull;
bytes = lbo - vbo;
vcn = vbo >> cluster_bits;
if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
err = -ENOENT;
goto out;
}
off = vbo & sbi->cluster_mask; off = vbo & sbi->cluster_mask;
page_idx = 0; lbo = ((u64)lcn << cluster_bits) + off;
page = pages[0]; len = ((u64)clen << cluster_bits) - off;
for (;;) { for (;;) {
lbo = ((u64)lcn << cluster_bits) + off; /* Read range [lbo, lbo+len). */
len = ((u64)clen << cluster_bits) - off; folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
new_bio:
new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS); if (IS_ERR(folio))
if (bio) { return PTR_ERR(folio);
bio_chain(bio, new);
submit_bio(bio); off = offset_in_page(lbo);
op = PAGE_SIZE - off;
if (op > len)
op = len;
if (op > bytes)
op = bytes;
kaddr = kmap_local_folio(folio, 0);
if (wr) {
memcpy(kaddr + off, buf, op);
folio_mark_dirty(folio);
} else {
memcpy(buf, kaddr + off, op);
flush_dcache_folio(folio);
} }
bio = new; kunmap_local(kaddr);
bio->bi_iter.bi_sector = lbo >> 9; folio_put(folio);
while (len) { bytes -= op;
off = vbo & (PAGE_SIZE - 1); if (!bytes)
add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len; return 0;
if (bio_add_page(bio, page, add, off) < add) buf += op;
goto new_bio; len -= op;
if (len) {
if (bytes <= add) /* next volume's page. */
goto out; lbo += op;
bytes -= add; continue;
vbo += add;
if (add + off == PAGE_SIZE) {
page_idx += 1;
if (WARN_ON(page_idx >= nr_pages)) {
err = -EINVAL;
goto out;
}
page = pages[page_idx];
}
if (len <= add)
break;
len -= add;
lbo += add;
} }
/* get next range. */
vcn_next = vcn + clen; vcn_next = vcn + clen;
if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) || if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
vcn != vcn_next) { vcn != vcn_next) {
err = -ENOENT; return -ENOENT;
goto out;
} }
off = 0;
}
out:
if (bio) {
if (!err)
err = submit_bio_wait(bio);
bio_put(bio);
}
blk_finish_plug(&plug);
return err; if (lcn == SPARSE_LCN)
return -EINVAL;
lbo = ((u64)lcn << cluster_bits);
len = ((u64)clen << cluster_bits);
}
} }
/* /*

View File

@ -2107,7 +2107,6 @@ const struct address_space_operations ntfs_aops = {
const struct address_space_operations ntfs_aops_cmpr = { const struct address_space_operations ntfs_aops_cmpr = {
.read_folio = ntfs_read_folio, .read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
.dirty_folio = block_dirty_folio, .dirty_folio = block_dirty_folio,
.direct_IO = ntfs_direct_IO, .direct_IO = ntfs_direct_IO,
}; };

View File

@ -570,7 +570,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio); int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio);
int ni_decompress_file(struct ntfs_inode *ni); int ni_decompress_file(struct ntfs_inode *ni);
int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
u32 pages_per_frame); u32 pages_per_frame, int copy);
int ni_write_frame(struct ntfs_inode *ni, struct page **pages, int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u32 pages_per_frame); u32 pages_per_frame);
int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
@ -633,9 +633,21 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
u32 bytes, struct ntfs_buffers *nb); u32 bytes, struct ntfs_buffers *nb);
int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr, int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
struct ntfs_buffers *nb, int sync); struct ntfs_buffers *nb, int sync);
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, void *buf, u64 vbo, size_t bytes, int wr);
enum req_op op); static inline int ntfs_read_run(struct ntfs_sb_info *sbi,
const struct runs_tree *run, void *buf, u64 vbo,
size_t bytes)
{
return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 0);
}
static inline int ntfs_write_run(struct ntfs_sb_info *sbi,
const struct runs_tree *run, void *buf,
u64 vbo, size_t bytes)
{
return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 1);
}
int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run); int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run, int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes); u64 vbo, u64 *lbo, u64 *bytes);