btrfs: fix typos in comments and strings
Annual typo fixing pass. Strangely codespell found only about 30% of what is in this patch, the rest was done manually using text spellchecker with a custom dictionary of acceptable terms. Reviewed-by: Neal Gompa <neal@gompa.dev> Signed-off-by: David Sterba <dsterba@suse.com>pull/1354/merge
parent
74e8f002b7
commit
17dc82dc1e
|
|
@ -44,7 +44,7 @@ static __always_inline void memcpy_split_src(char *dest, const char *src1,
|
||||||
* gives us all the type checking.
|
* gives us all the type checking.
|
||||||
*
|
*
|
||||||
* The extent buffer pages stored in the array folios may not form a contiguous
|
* The extent buffer pages stored in the array folios may not form a contiguous
|
||||||
* phyusical range, but the API functions assume the linear offset to the range
|
* physical range, but the API functions assume the linear offset to the range
|
||||||
* from 0 to metadata node size.
|
* from 0 to metadata node size.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1690,7 +1690,7 @@ out:
|
||||||
* @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
|
* @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
|
||||||
* added to the ulist at @ctx->refs, and that ulist is allocated by this
|
* added to the ulist at @ctx->refs, and that ulist is allocated by this
|
||||||
* function. The caller should free the ulist with free_leaf_list() if
|
* function. The caller should free the ulist with free_leaf_list() if
|
||||||
* @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
|
* @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is
|
||||||
* enough.
|
* enough.
|
||||||
*
|
*
|
||||||
* Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
|
* Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
|
||||||
|
|
|
||||||
|
|
@ -190,7 +190,7 @@ struct btrfs_backref_share_check_ctx {
|
||||||
* It's very common to have several file extent items that point to the
|
* It's very common to have several file extent items that point to the
|
||||||
* same extent (bytenr) but with different offsets and lengths. This
|
* same extent (bytenr) but with different offsets and lengths. This
|
||||||
* typically happens for COW writes, partial writes into prealloc
|
* typically happens for COW writes, partial writes into prealloc
|
||||||
* extents, NOCOW writes after snapshoting a root, hole punching or
|
* extents, NOCOW writes after snapshotting a root, hole punching or
|
||||||
* reflinking within the same file (less common perhaps).
|
* reflinking within the same file (less common perhaps).
|
||||||
* So keep a small cache with the lookup results for the extent pointed
|
* So keep a small cache with the lookup results for the extent pointed
|
||||||
* by the last few file extent items. This cache is checked, with a
|
* by the last few file extent items. This cache is checked, with a
|
||||||
|
|
@ -414,7 +414,7 @@ struct btrfs_backref_cache {
|
||||||
/*
|
/*
|
||||||
* Whether this cache is for relocation
|
* Whether this cache is for relocation
|
||||||
*
|
*
|
||||||
* Reloction backref cache require more info for reloc root compared
|
* Relocation backref cache require more info for reloc root compared
|
||||||
* to generic backref cache.
|
* to generic backref cache.
|
||||||
*/
|
*/
|
||||||
bool is_reloc;
|
bool is_reloc;
|
||||||
|
|
|
||||||
|
|
@ -1971,7 +1971,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
|
||||||
* called, which is where we will transfer a reserved extent's
|
* called, which is where we will transfer a reserved extent's
|
||||||
* size from the "reserved" counter to the "used" counter - this
|
* size from the "reserved" counter to the "used" counter - this
|
||||||
* happens when running delayed references. When we relocate the
|
* happens when running delayed references. When we relocate the
|
||||||
* chunk below, relocation first flushes dellaloc, waits for
|
* chunk below, relocation first flushes delalloc, waits for
|
||||||
* ordered extent completion (which is where we create delayed
|
* ordered extent completion (which is where we create delayed
|
||||||
* references for data extents) and commits the current
|
* references for data extents) and commits the current
|
||||||
* transaction (which runs delayed references), and only after
|
* transaction (which runs delayed references), and only after
|
||||||
|
|
@ -2839,7 +2839,7 @@ next:
|
||||||
* space or none at all (due to no need to COW, extent buffers
|
* space or none at all (due to no need to COW, extent buffers
|
||||||
* were already COWed in the current transaction and still
|
* were already COWed in the current transaction and still
|
||||||
* unwritten, tree heights lower than the maximum possible
|
* unwritten, tree heights lower than the maximum possible
|
||||||
* height, etc). For data we generally reserve the axact amount
|
* height, etc). For data we generally reserve the exact amount
|
||||||
* of space we are going to allocate later, the exception is
|
* of space we are going to allocate later, the exception is
|
||||||
* when using compression, as we must reserve space based on the
|
* when using compression, as we must reserve space based on the
|
||||||
* uncompressed data size, because the compression is only done
|
* uncompressed data size, because the compression is only done
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ enum btrfs_discard_state {
|
||||||
* CHUNK_ALLOC_FORCE means it must try to allocate one
|
* CHUNK_ALLOC_FORCE means it must try to allocate one
|
||||||
*
|
*
|
||||||
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
|
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
|
||||||
* find_free_extent() that also activaes the zone
|
* find_free_extent() that also activates the zone
|
||||||
*/
|
*/
|
||||||
enum btrfs_chunk_alloc_enum {
|
enum btrfs_chunk_alloc_enum {
|
||||||
CHUNK_ALLOC_NO_FORCE,
|
CHUNK_ALLOC_NO_FORCE,
|
||||||
|
|
|
||||||
|
|
@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
|
||||||
#define ENTROPY_LVL_HIGH (80)
|
#define ENTROPY_LVL_HIGH (80)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For increasead precision in shannon_entropy calculation,
|
* For increased precision in shannon_entropy calculation,
|
||||||
* let's do pow(n, M) to save more digits after comma:
|
* let's do pow(n, M) to save more digits after comma:
|
||||||
*
|
*
|
||||||
* - maximum int bit length is 64
|
* - maximum int bit length is 64
|
||||||
|
|
|
||||||
|
|
@ -153,7 +153,7 @@ void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pick the defragable inode that we want, if it doesn't exist, we will get the
|
* Pick the defraggable inode that we want, if it doesn't exist, we will get the
|
||||||
* next one.
|
* next one.
|
||||||
*/
|
*/
|
||||||
static struct inode_defrag *btrfs_pick_defrag_inode(
|
static struct inode_defrag *btrfs_pick_defrag_inode(
|
||||||
|
|
|
||||||
|
|
@ -895,7 +895,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the structure which represents a modification to a an extent.
|
* Initialize the structure which represents a modification to an extent.
|
||||||
*
|
*
|
||||||
* @fs_info: Internal to the mounted filesystem mount structure.
|
* @fs_info: Internal to the mounted filesystem mount structure.
|
||||||
*
|
*
|
||||||
|
|
|
||||||
|
|
@ -637,7 +637,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||||
break;
|
break;
|
||||||
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
||||||
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
|
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
|
||||||
DEBUG_WARN("unexpected STARTED ot SUSPENDED dev-replace state");
|
DEBUG_WARN("unexpected STARTED or SUSPENDED dev-replace state");
|
||||||
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
|
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
|
||||||
up_write(&dev_replace->rwsem);
|
up_write(&dev_replace->rwsem);
|
||||||
goto leave;
|
goto leave;
|
||||||
|
|
|
||||||
|
|
@ -3245,7 +3245,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
|
||||||
/*
|
/*
|
||||||
* Subpage runtime limitation on v1 cache.
|
* Subpage runtime limitation on v1 cache.
|
||||||
*
|
*
|
||||||
* V1 space cache still has some hard codeed PAGE_SIZE usage, while
|
* V1 space cache still has some hard coded PAGE_SIZE usage, while
|
||||||
* we're already defaulting to v2 cache, no need to bother v1 as it's
|
* we're already defaulting to v2 cache, no need to bother v1 as it's
|
||||||
* going to be deprecated anyway.
|
* going to be deprecated anyway.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -1237,7 +1237,7 @@ hit_next:
|
||||||
state = next_search_state(inserted_state, end);
|
state = next_search_state(inserted_state, end);
|
||||||
/*
|
/*
|
||||||
* If there's a next state, whether contiguous or not, we don't
|
* If there's a next state, whether contiguous or not, we don't
|
||||||
* need to unlock and start search agian. If it's not contiguous
|
* need to unlock and start search again. If it's not contiguous
|
||||||
* we will end up here and try to allocate a prealloc state and insert.
|
* we will end up here and try to allocate a prealloc state and insert.
|
||||||
*/
|
*/
|
||||||
if (state)
|
if (state)
|
||||||
|
|
|
||||||
|
|
@ -325,7 +325,7 @@ search_again:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
|
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
|
||||||
* is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
|
* is_data == BTRFS_REF_TYPE_DATA, data type is required,
|
||||||
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
|
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
|
||||||
*/
|
*/
|
||||||
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
|
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
|
||||||
|
|
@ -4316,7 +4316,7 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
|
||||||
spin_lock(&fs_info->zone_active_bgs_lock);
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
||||||
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
|
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
|
||||||
/*
|
/*
|
||||||
* No lock is OK here because avail is monotinically
|
* No lock is OK here because avail is monotonically
|
||||||
* decreasing, and this is just a hint.
|
* decreasing, and this is just a hint.
|
||||||
*/
|
*/
|
||||||
u64 avail = block_group->zone_capacity - block_group->alloc_offset;
|
u64 avail = block_group->zone_capacity - block_group->alloc_offset;
|
||||||
|
|
@ -5613,7 +5613,7 @@ static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
|
||||||
* If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
|
* If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
|
||||||
*
|
*
|
||||||
* If we are DROP_REFERENCE this will figure out if we need to drop our current
|
* If we are DROP_REFERENCE this will figure out if we need to drop our current
|
||||||
* reference, skipping it if we dropped it from a previous incompleted drop, or
|
* reference, skipping it if we dropped it from a previous uncompleted drop, or
|
||||||
* dropping it if we still have a reference to it.
|
* dropping it if we still have a reference to it.
|
||||||
*/
|
*/
|
||||||
static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||||
|
|
@ -5760,7 +5760,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to walk down into this node, and if we're currently at the
|
* We have to walk down into this node, and if we're currently at the
|
||||||
* DROP_REFERNCE stage and this block is shared then we need to switch
|
* DROP_REFERENCE stage and this block is shared then we need to switch
|
||||||
* to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
|
* to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
|
||||||
*/
|
*/
|
||||||
if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
|
if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
|
||||||
|
|
|
||||||
|
|
@ -418,7 +418,7 @@ again:
|
||||||
if (delalloc_end + 1 - delalloc_start > max_bytes)
|
if (delalloc_end + 1 - delalloc_start > max_bytes)
|
||||||
delalloc_end = delalloc_start + max_bytes - 1;
|
delalloc_end = delalloc_start + max_bytes - 1;
|
||||||
|
|
||||||
/* step two, lock all the folioss after the folios that has start */
|
/* step two, lock all the folios after the folios that has start */
|
||||||
ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
|
ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
|
||||||
delalloc_end);
|
delalloc_end);
|
||||||
ASSERT(!ret || ret == -EAGAIN);
|
ASSERT(!ret || ret == -EAGAIN);
|
||||||
|
|
@ -772,7 +772,7 @@ static void alloc_new_bio(struct btrfs_inode *inode,
|
||||||
*
|
*
|
||||||
* The will either add the page into the existing @bio_ctrl->bbio, or allocate a
|
* The will either add the page into the existing @bio_ctrl->bbio, or allocate a
|
||||||
* new one in @bio_ctrl->bbio.
|
* new one in @bio_ctrl->bbio.
|
||||||
* The mirror number for this IO should already be initizlied in
|
* The mirror number for this IO should already be initialized in
|
||||||
* @bio_ctrl->mirror_num.
|
* @bio_ctrl->mirror_num.
|
||||||
*/
|
*/
|
||||||
static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
|
static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
|
||||||
|
|
@ -2225,7 +2225,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
|
||||||
* @fs_info: The fs_info for this file system.
|
* @fs_info: The fs_info for this file system.
|
||||||
* @start: The offset of the range to start waiting on writeback.
|
* @start: The offset of the range to start waiting on writeback.
|
||||||
* @end: The end of the range, inclusive. This is meant to be used in
|
* @end: The end of the range, inclusive. This is meant to be used in
|
||||||
* conjuction with wait_marked_extents, so this will usually be
|
* conjunction with wait_marked_extents, so this will usually be
|
||||||
* the_next_eb->start - 1.
|
* the_next_eb->start - 1.
|
||||||
*/
|
*/
|
||||||
void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
|
void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
|
||||||
|
|
@ -2495,7 +2495,7 @@ retry:
|
||||||
* In above case, [32K, 96K) is asynchronously submitted
|
* In above case, [32K, 96K) is asynchronously submitted
|
||||||
* for compression, and [124K, 128K) needs to be written back.
|
* for compression, and [124K, 128K) needs to be written back.
|
||||||
*
|
*
|
||||||
* If we didn't wait wrtiteback for page 64K, [128K, 128K)
|
* If we didn't wait writeback for page 64K, [128K, 128K)
|
||||||
* won't be submitted as the page still has writeback flag
|
* won't be submitted as the page still has writeback flag
|
||||||
* and will be skipped in the next check.
|
* and will be skipped in the next check.
|
||||||
*
|
*
|
||||||
|
|
@ -2979,7 +2979,7 @@ static void cleanup_extent_buffer_folios(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
const int num_folios = num_extent_folios(eb);
|
const int num_folios = num_extent_folios(eb);
|
||||||
|
|
||||||
/* We canont use num_extent_folios() as loop bound as eb->folios changes. */
|
/* We cannot use num_extent_folios() as loop bound as eb->folios changes. */
|
||||||
for (int i = 0; i < num_folios; i++) {
|
for (int i = 0; i < num_folios; i++) {
|
||||||
ASSERT(eb->folios[i]);
|
ASSERT(eb->folios[i]);
|
||||||
detach_extent_buffer_folio(eb, eb->folios[i]);
|
detach_extent_buffer_folio(eb, eb->folios[i]);
|
||||||
|
|
|
||||||
|
|
@ -153,7 +153,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
|
||||||
if (cache_end > offset) {
|
if (cache_end > offset) {
|
||||||
if (offset == cache->offset) {
|
if (offset == cache->offset) {
|
||||||
/*
|
/*
|
||||||
* We cached a dealloc range (found in the io tree) for
|
* We cached a delalloc range (found in the io tree) for
|
||||||
* a hole or prealloc extent and we have now found a
|
* a hole or prealloc extent and we have now found a
|
||||||
* file extent item for the same offset. What we have
|
* file extent item for the same offset. What we have
|
||||||
* now is more recent and up to date, so discard what
|
* now is more recent and up to date, so discard what
|
||||||
|
|
|
||||||
|
|
@ -970,7 +970,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
|
||||||
* Return:
|
* Return:
|
||||||
* > 0 If we can nocow, and updates @write_bytes.
|
* > 0 If we can nocow, and updates @write_bytes.
|
||||||
* 0 If we can't do a nocow write.
|
* 0 If we can't do a nocow write.
|
||||||
* -EAGAIN If we can't do a nocow write because snapshoting of the inode's
|
* -EAGAIN If we can't do a nocow write because snapshotting of the inode's
|
||||||
* root is in progress or because we are in a non-blocking IO
|
* root is in progress or because we are in a non-blocking IO
|
||||||
* context and need to block (@nowait is true).
|
* context and need to block (@nowait is true).
|
||||||
* < 0 If an error happened.
|
* < 0 If an error happened.
|
||||||
|
|
@ -3345,7 +3345,7 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
|
||||||
* We could also use the extent map tree to find such delalloc that is
|
* We could also use the extent map tree to find such delalloc that is
|
||||||
* being flushed, but using the ordered extents tree is more efficient
|
* being flushed, but using the ordered extents tree is more efficient
|
||||||
* because it's usually much smaller as ordered extents are removed from
|
* because it's usually much smaller as ordered extents are removed from
|
||||||
* the tree once they complete. With the extent maps, we mau have them
|
* the tree once they complete. With the extent maps, we may have them
|
||||||
* in the extent map tree for a very long time, and they were either
|
* in the extent map tree for a very long time, and they were either
|
||||||
* created by previous writes or loaded by read operations.
|
* created by previous writes or loaded by read operations.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -2282,7 +2282,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||||
* If this block group has some small extents we don't want to
|
* If this block group has some small extents we don't want to
|
||||||
* use up all of our free slots in the cache with them, we want
|
* use up all of our free slots in the cache with them, we want
|
||||||
* to reserve them to larger extents, however if we have plenty
|
* to reserve them to larger extents, however if we have plenty
|
||||||
* of cache left then go ahead an dadd them, no sense in adding
|
* of cache left then go ahead and add them, no sense in adding
|
||||||
* the overhead of a bitmap if we don't have to.
|
* the overhead of a bitmap if we don't have to.
|
||||||
*/
|
*/
|
||||||
if (info->bytes <= fs_info->sectorsize * 8) {
|
if (info->bytes <= fs_info->sectorsize * 8) {
|
||||||
|
|
@ -3829,7 +3829,7 @@ out_unlock:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we break out of trimming a bitmap prematurely, we should reset the
|
* If we break out of trimming a bitmap prematurely, we should reset the
|
||||||
* trimming bit. In a rather contrieved case, it's possible to race here so
|
* trimming bit. In a rather contrived case, it's possible to race here so
|
||||||
* reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
|
* reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
|
||||||
*
|
*
|
||||||
* start = start of bitmap
|
* start = start of bitmap
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ size_t __attribute_const__ btrfs_get_num_csums(void)
|
||||||
* We support the following block sizes for all systems:
|
* We support the following block sizes for all systems:
|
||||||
*
|
*
|
||||||
* - 4K
|
* - 4K
|
||||||
* This is the most common block size. For PAGE SIZE > 4K cases the subage
|
* This is the most common block size. For PAGE SIZE > 4K cases the subpage
|
||||||
* mode is used.
|
* mode is used.
|
||||||
*
|
*
|
||||||
* - PAGE_SIZE
|
* - PAGE_SIZE
|
||||||
|
|
|
||||||
|
|
@ -283,7 +283,7 @@ enum {
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_EXPERIMENTAL
|
#ifdef CONFIG_BTRFS_EXPERIMENTAL
|
||||||
/*
|
/*
|
||||||
* Features under developmen like Extent tree v2 support is enabled
|
* Features under development like Extent tree v2 support is enabled
|
||||||
* only under CONFIG_BTRFS_EXPERIMENTAL
|
* only under CONFIG_BTRFS_EXPERIMENTAL
|
||||||
*/
|
*/
|
||||||
#define BTRFS_FEATURE_INCOMPAT_SUPP \
|
#define BTRFS_FEATURE_INCOMPAT_SUPP \
|
||||||
|
|
|
||||||
|
|
@ -370,7 +370,7 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unock inode i_rwsem.
|
* Unlock inode i_rwsem.
|
||||||
*
|
*
|
||||||
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
|
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
|
||||||
* to decide whether the lock acquired is shared or exclusive.
|
* to decide whether the lock acquired is shared or exclusive.
|
||||||
|
|
@ -1990,7 +1990,7 @@ error:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* when nowcow writeback call back. This checks for snapshots or COW copies
|
* When nocow writeback calls back. This checks for snapshots or COW copies
|
||||||
* of the extents that exist in the file, and COWs the file as required.
|
* of the extents that exist in the file, and COWs the file as required.
|
||||||
*
|
*
|
||||||
* If no cow copies or snapshots exist, we write directly to the existing
|
* If no cow copies or snapshots exist, we write directly to the existing
|
||||||
|
|
@ -2233,7 +2233,7 @@ error:
|
||||||
* | OE cleanup | Skip | Untouched |
|
* | OE cleanup | Skip | Untouched |
|
||||||
*
|
*
|
||||||
* nocow_one_range() failed, the range [cur_offset, nocow_end] is
|
* nocow_one_range() failed, the range [cur_offset, nocow_end] is
|
||||||
* alread cleaned up.
|
* already cleaned up.
|
||||||
*/
|
*/
|
||||||
oe_cleanup_start = start;
|
oe_cleanup_start = start;
|
||||||
oe_cleanup_len = cur_offset - start;
|
oe_cleanup_len = cur_offset - start;
|
||||||
|
|
@ -2986,7 +2986,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
|
||||||
* If we dropped an inline extent here, we know the range where it is
|
* If we dropped an inline extent here, we know the range where it is
|
||||||
* was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
|
* was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
|
||||||
* number of bytes only for that range containing the inline extent.
|
* number of bytes only for that range containing the inline extent.
|
||||||
* The remaining of the range will be processed when clearning the
|
* The remaining of the range will be processed when clearing the
|
||||||
* EXTENT_DELALLOC_BIT bit through the ordered extent completion.
|
* EXTENT_DELALLOC_BIT bit through the ordered extent completion.
|
||||||
*/
|
*/
|
||||||
if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
|
if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
|
||||||
|
|
@ -4905,7 +4905,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 e
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip the truncatioin if the range in the target block is already aligned.
|
* Skip the truncation if the range in the target block is already aligned.
|
||||||
* The seemingly complex check will also handle the same block case.
|
* The seemingly complex check will also handle the same block case.
|
||||||
*/
|
*/
|
||||||
if (in_head_block && !IS_ALIGNED(start, blocksize))
|
if (in_head_block && !IS_ALIGNED(start, blocksize))
|
||||||
|
|
|
||||||
|
|
@ -957,7 +957,7 @@ static noinline int btrfs_mksnapshot(struct dentry *parent,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force new buffered writes to reserve space even when NOCOW is
|
* Force new buffered writes to reserve space even when NOCOW is
|
||||||
* possible. This is to avoid later writeback (running dealloc) to
|
* possible. This is to avoid later writeback (running delalloc) to
|
||||||
* fallback to COW mode and unexpectedly fail with ENOSPC.
|
* fallback to COW mode and unexpectedly fail with ENOSPC.
|
||||||
*/
|
*/
|
||||||
btrfs_drew_read_lock(&root->snapshot_lock);
|
btrfs_drew_read_lock(&root->snapshot_lock);
|
||||||
|
|
|
||||||
|
|
@ -361,7 +361,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
|
||||||
atomic_inc(&lock->readers);
|
atomic_inc(&lock->readers);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure the pending reader count is perceieved BEFORE this reader
|
* Ensure the pending reader count is perceived BEFORE this reader
|
||||||
* goes to sleep in case of active writers. This guarantees new writers
|
* goes to sleep in case of active writers. This guarantees new writers
|
||||||
* won't be allowed and that the current reader will be woken up when
|
* won't be allowed and that the current reader will be woken up when
|
||||||
* the last active writer finishes its jobs.
|
* the last active writer finishes its jobs.
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ enum btrfs_lock_nesting {
|
||||||
BTRFS_NESTING_NEW_ROOT,
|
BTRFS_NESTING_NEW_ROOT,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
|
* We are limited to MAX_LOCKDEP_SUBCLASSES number of subclasses, so
|
||||||
* add this in here and add a static_assert to keep us from going over
|
* add this in here and add a static_assert to keep us from going over
|
||||||
* the limit. As of this writing we're limited to 8, and we're
|
* the limit. As of this writing we're limited to 8, and we're
|
||||||
* definitely using 8, hence this check to keep us from messing up in
|
* definitely using 8, hence this check to keep us from messing up in
|
||||||
|
|
|
||||||
|
|
@ -113,7 +113,7 @@ enum {
|
||||||
/* Which blocks are covered by extent items. */
|
/* Which blocks are covered by extent items. */
|
||||||
scrub_bitmap_nr_has_extent = 0,
|
scrub_bitmap_nr_has_extent = 0,
|
||||||
|
|
||||||
/* Which blocks are meteadata. */
|
/* Which blocks are metadata. */
|
||||||
scrub_bitmap_nr_is_metadata,
|
scrub_bitmap_nr_is_metadata,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -1738,7 +1738,7 @@ static int read_symlink(struct btrfs_root *root,
|
||||||
* An empty symlink inode. Can happen in rare error paths when
|
* An empty symlink inode. Can happen in rare error paths when
|
||||||
* creating a symlink (transaction committed before the inode
|
* creating a symlink (transaction committed before the inode
|
||||||
* eviction handler removed the symlink inode items and a crash
|
* eviction handler removed the symlink inode items and a crash
|
||||||
* happened in between or the subvol was snapshoted in between).
|
* happened in between or the subvol was snapshotted in between).
|
||||||
* Print an informative message to dmesg/syslog so that the user
|
* Print an informative message to dmesg/syslog so that the user
|
||||||
* can delete the symlink.
|
* can delete the symlink.
|
||||||
*/
|
*/
|
||||||
|
|
@ -2768,7 +2768,7 @@ out:
|
||||||
* processing an inode that is a directory and it just got renamed, and existing
|
* processing an inode that is a directory and it just got renamed, and existing
|
||||||
* entries in the cache may refer to inodes that have the directory in their
|
* entries in the cache may refer to inodes that have the directory in their
|
||||||
* full path - in which case we would generate outdated paths (pre-rename)
|
* full path - in which case we would generate outdated paths (pre-rename)
|
||||||
* for the inodes that the cache entries point to. Instead of prunning the
|
* for the inodes that the cache entries point to. Instead of pruning the
|
||||||
* cache when inserting, do it after we finish processing each inode at
|
* cache when inserting, do it after we finish processing each inode at
|
||||||
* finish_inode_if_needed().
|
* finish_inode_if_needed().
|
||||||
*/
|
*/
|
||||||
|
|
@ -7984,7 +7984,7 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure any existing dellaloc is flushed for any root used by a send
|
* Make sure any existing delalloc is flushed for any root used by a send
|
||||||
* operation so that we do not miss any data and we do not race with writeback
|
* operation so that we do not miss any data and we do not race with writeback
|
||||||
* finishing and changing a tree while send is using the tree. This could
|
* finishing and changing a tree while send is using the tree. This could
|
||||||
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
|
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
|
||||||
|
|
|
||||||
|
|
@ -479,7 +479,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On the zoned mode, we always allocate one zone as one chunk.
|
* On the zoned mode, we always allocate one zone as one chunk.
|
||||||
* Returning non-zone size alingned bytes here will result in
|
* Returning non-zone size aligned bytes here will result in
|
||||||
* less pressure for the async metadata reclaim process, and it
|
* less pressure for the async metadata reclaim process, and it
|
||||||
* will over-commit too much leading to ENOSPC. Align down to the
|
* will over-commit too much leading to ENOSPC. Align down to the
|
||||||
* zone size to avoid that.
|
* zone size to avoid that.
|
||||||
|
|
@ -1528,7 +1528,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
|
||||||
* turned into error mode due to a transaction abort when flushing space
|
* turned into error mode due to a transaction abort when flushing space
|
||||||
* above, in that case fail with the abort error instead of returning
|
* above, in that case fail with the abort error instead of returning
|
||||||
* success to the caller if we can steal from the global rsv - this is
|
* success to the caller if we can steal from the global rsv - this is
|
||||||
* just to have caller fail immeditelly instead of later when trying to
|
* just to have caller fail immediately instead of later when trying to
|
||||||
* modify the fs, making it easier to debug -ENOSPC problems.
|
* modify the fs, making it easier to debug -ENOSPC problems.
|
||||||
*/
|
*/
|
||||||
if (BTRFS_FS_ERROR(fs_info)) {
|
if (BTRFS_FS_ERROR(fs_info)) {
|
||||||
|
|
|
||||||
|
|
@ -690,7 +690,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
|
||||||
\
|
\
|
||||||
GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
|
GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
|
||||||
btrfs_warn(fs_info, \
|
btrfs_warn(fs_info, \
|
||||||
"dumpping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
|
"dumping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
|
||||||
start, len, folio_pos(folio), \
|
start, len, folio_pos(folio), \
|
||||||
blocks_per_folio, &bitmap); \
|
blocks_per_folio, &bitmap); \
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ struct address_space;
|
||||||
struct folio;
|
struct folio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extra info for subpapge bitmap.
|
* Extra info for subpage bitmap.
|
||||||
*
|
*
|
||||||
* For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
|
* For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
|
||||||
* one larger bitmap.
|
* one larger bitmap.
|
||||||
|
|
|
||||||
|
|
@ -1283,7 +1283,7 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
|
||||||
const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
|
const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to cleanup all defragable inodes if the autodefragment is
|
* We need to cleanup all defraggable inodes if the autodefragment is
|
||||||
* close or the filesystem is read only.
|
* close or the filesystem is read only.
|
||||||
*/
|
*/
|
||||||
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
|
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
|
||||||
|
|
|
||||||
|
|
@ -997,12 +997,12 @@ int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
|
||||||
|
|
||||||
ret = simple_tests(&trans);
|
ret = simple_tests(&trans);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
test_msg("running delayed refs merg tests on metadata refs");
|
test_msg("running delayed refs merge tests on metadata refs");
|
||||||
ret = merge_tests(&trans, BTRFS_REF_METADATA);
|
ret = merge_tests(&trans, BTRFS_REF_METADATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
test_msg("running delayed refs merg tests on data refs");
|
test_msg("running delayed refs merge tests on data refs");
|
||||||
ret = merge_tests(&trans, BTRFS_REF_DATA);
|
ret = merge_tests(&trans, BTRFS_REF_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1095,7 +1095,7 @@ int btrfs_test_extent_map(void)
|
||||||
/*
|
/*
|
||||||
* Test a chunk with 2 data stripes one of which
|
* Test a chunk with 2 data stripes one of which
|
||||||
* intersects the physical address of the super block
|
* intersects the physical address of the super block
|
||||||
* is correctly recognised.
|
* is correctly recognized.
|
||||||
*/
|
*/
|
||||||
.raid_type = BTRFS_BLOCK_GROUP_RAID1,
|
.raid_type = BTRFS_BLOCK_GROUP_RAID1,
|
||||||
.physical_start = SZ_64M - SZ_4M,
|
.physical_start = SZ_64M - SZ_4M,
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,7 @@ static struct kmem_cache *btrfs_trans_handle_cachep;
|
||||||
* | attached to transid N+1. |
|
* | attached to transid N+1. |
|
||||||
* | |
|
* | |
|
||||||
* | To next stage: |
|
* | To next stage: |
|
||||||
* | Until all tree blocks are super blocks are |
|
* | Until all tree blocks and super blocks are |
|
||||||
* | written to block devices |
|
* | written to block devices |
|
||||||
* V |
|
* V |
|
||||||
* Transaction N [[TRANS_STATE_COMPLETED]] V
|
* Transaction N [[TRANS_STATE_COMPLETED]] V
|
||||||
|
|
@ -2423,7 +2423,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
||||||
* them.
|
* them.
|
||||||
*
|
*
|
||||||
* We needn't worry that this operation will corrupt the snapshots,
|
* We needn't worry that this operation will corrupt the snapshots,
|
||||||
* because all the tree which are snapshoted will be forced to COW
|
* because all the tree which are snapshotted will be forced to COW
|
||||||
* the nodes and leaves.
|
* the nodes and leaves.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_run_delayed_items(trans);
|
ret = btrfs_run_delayed_items(trans);
|
||||||
|
|
|
||||||
|
|
@ -1209,7 +1209,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
|
||||||
/*
|
/*
|
||||||
* For legacy root item, the members starting at generation_v2 will be
|
* For legacy root item, the members starting at generation_v2 will be
|
||||||
* all filled with 0.
|
* all filled with 0.
|
||||||
* And since we allow geneartion_v2 as 0, it will still pass the check.
|
* And since we allow generation_v2 as 0, it will still pass the check.
|
||||||
*/
|
*/
|
||||||
read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
|
read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
|
||||||
btrfs_item_size(leaf, slot));
|
btrfs_item_size(leaf, slot));
|
||||||
|
|
|
||||||
|
|
@ -1816,7 +1816,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fixup on a directory may create new entries,
|
* fixup on a directory may create new entries,
|
||||||
* make sure we always look for the highset possible
|
* make sure we always look for the highest possible
|
||||||
* offset
|
* offset
|
||||||
*/
|
*/
|
||||||
key.offset = (u64)-1;
|
key.offset = (u64)-1;
|
||||||
|
|
@ -3619,7 +3619,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The inode was previously logged and then evicted, set logged_trans to
|
* The inode was previously logged and then evicted, set logged_trans to
|
||||||
* the current transacion's ID, to avoid future tree searches as long as
|
* the current transaction's ID, to avoid future tree searches as long as
|
||||||
* the inode is not evicted again.
|
* the inode is not evicted again.
|
||||||
*/
|
*/
|
||||||
spin_lock(&inode->lock);
|
spin_lock(&inode->lock);
|
||||||
|
|
|
||||||
|
|
@ -1377,8 +1377,8 @@ struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the last byte of label is properly NUL termiated. We use
|
* Make sure the last byte of label is properly NUL terminated. We use
|
||||||
* '%s' to print the label, if not properly NUL termiated we can access
|
* '%s' to print the label, if not properly NUL terminated we can access
|
||||||
* beyond the label.
|
* beyond the label.
|
||||||
*/
|
*/
|
||||||
if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1])
|
if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1])
|
||||||
|
|
@ -4463,7 +4463,7 @@ out_overflow:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should be called with balance mutexe held
|
* Should be called with balance mutex held
|
||||||
*/
|
*/
|
||||||
int btrfs_balance(struct btrfs_fs_info *fs_info,
|
int btrfs_balance(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_balance_control *bctl,
|
struct btrfs_balance_control *bctl,
|
||||||
|
|
@ -7486,7 +7486,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
|
||||||
/*
|
/*
|
||||||
* Lockdep complains about possible circular locking dependency between
|
* Lockdep complains about possible circular locking dependency between
|
||||||
* a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
|
* a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
|
||||||
* used for freeze procection of a fs (struct super_block.s_writers),
|
* used for freeze protection of a fs (struct super_block.s_writers),
|
||||||
* which we take when starting a transaction, and extent buffers of the
|
* which we take when starting a transaction, and extent buffers of the
|
||||||
* chunk tree if we call read_one_dev() while holding a lock on an
|
* chunk tree if we call read_one_dev() while holding a lock on an
|
||||||
* extent buffer of the chunk tree. Since we are mounting the filesystem
|
* extent buffer of the chunk tree. Since we are mounting the filesystem
|
||||||
|
|
@ -7919,8 +7919,6 @@ int btrfs_bg_type_to_factor(u64 flags)
|
||||||
return btrfs_raid_array[index].ncopies;
|
return btrfs_raid_array[index].ncopies;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
|
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
|
||||||
u64 chunk_offset, u64 devid,
|
u64 chunk_offset, u64 devid,
|
||||||
u64 physical_offset, u64 physical_len)
|
u64 physical_offset, u64 physical_len)
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ struct btrfs_zoned_device_info;
|
||||||
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
|
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Arbitratry maximum size of one discard request to limit potentially long time
|
* Arbitrary maximum size of one discard request to limit potentially long time
|
||||||
* spent in blkdev_issue_discard().
|
* spent in blkdev_issue_discard().
|
||||||
*/
|
*/
|
||||||
#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
|
#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
|
||||||
|
|
@ -495,7 +495,7 @@ struct btrfs_discard_stripe {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Context for IO subsmission for device stripe.
|
* Context for IO submission for device stripe.
|
||||||
*
|
*
|
||||||
* - Track the unfinished mirrors for mirror based profiles
|
* - Track the unfinished mirrors for mirror based profiles
|
||||||
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
|
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue