fs: use min() or umin() instead of min_t()
min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'.
Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long'
and so cannot discard significant bits.
A couple of places need umin() because of loops like:
nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
for (i = 0; i < nfolios; i++) {
struct folio *folio = page_folio(pages[i]);
...
unsigned int len = umin(ret, PAGE_SIZE - start);
...
ret -= len;
...
}
where the compiler doesn't track things well enough to know that
'ret' is never negative.
The alternate loop:
for (i = 0; ret > 0; i++) {
struct folio *folio = page_folio(pages[i]);
...
unsigned int len = min(ret, PAGE_SIZE - start);
...
ret -= len;
...
}
would be equivalent and doesn't need 'nfolios'.
Most of the 'unsigned long' actually come from PAGE_SIZE.
Detected by an extra check added to min_t().
Signed-off-by: David Laight <david.laight.linux@gmail.com>
Link: https://patch.msgid.link/20251119224140.8616-31-david.laight.linux@gmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
master
parent
8f0b4cce44
commit
0f5bb0cfb0
|
|
@ -2354,7 +2354,7 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
|
|||
if (!head)
|
||||
return false;
|
||||
blocksize = head->b_size;
|
||||
to = min_t(unsigned, folio_size(folio) - from, count);
|
||||
to = min(folio_size(folio) - from, count);
|
||||
to = from + to;
|
||||
if (from < blocksize && to > folio_size(folio) - blocksize)
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -555,7 +555,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
|
|||
return -E2BIG;
|
||||
|
||||
while (len > 0) {
|
||||
unsigned int bytes_to_copy = min_t(unsigned int, len,
|
||||
unsigned int bytes_to_copy = min(len,
|
||||
min_not_zero(offset_in_page(pos), PAGE_SIZE));
|
||||
struct page *page;
|
||||
|
||||
|
|
|
|||
|
|
@ -4276,8 +4276,7 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
|
|||
* get the corresponding group metadata to work with.
|
||||
* For this we have goto again loop.
|
||||
*/
|
||||
thisgrp_len = min_t(unsigned int, (unsigned int)len,
|
||||
EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
|
||||
thisgrp_len = min(len, EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
|
||||
clen = EXT4_NUM_B2C(sbi, thisgrp_len);
|
||||
|
||||
if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
|
||||
|
|
|
|||
|
|
@ -1479,7 +1479,7 @@ static void ext4_update_super(struct super_block *sb,
|
|||
|
||||
/* Update the global fs size fields */
|
||||
sbi->s_groups_count += flex_gd->count;
|
||||
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
|
||||
sbi->s_blockfile_groups = min(sbi->s_groups_count,
|
||||
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
|
||||
|
||||
/* Update the reserved block counts only once the new group is
|
||||
|
|
|
|||
|
|
@ -4832,7 +4832,7 @@ static int ext4_check_geometry(struct super_block *sb,
|
|||
return -EINVAL;
|
||||
}
|
||||
sbi->s_groups_count = blocks_count;
|
||||
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
|
||||
sbi->s_blockfile_groups = min(sbi->s_groups_count,
|
||||
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
|
||||
if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
|
||||
le32_to_cpu(es->s_inodes_count)) {
|
||||
|
|
|
|||
|
|
@ -1353,7 +1353,7 @@ found:
|
|||
|
||||
/* Fill the long name slots. */
|
||||
for (i = 0; i < long_bhs; i++) {
|
||||
int copy = min_t(int, sb->s_blocksize - offset, size);
|
||||
int copy = umin(sb->s_blocksize - offset, size);
|
||||
memcpy(bhs[i]->b_data + offset, slots, copy);
|
||||
mark_buffer_dirty_inode(bhs[i], dir);
|
||||
offset = 0;
|
||||
|
|
@ -1364,7 +1364,7 @@ found:
|
|||
err = fat_sync_bhs(bhs, long_bhs);
|
||||
if (!err && i < nr_bhs) {
|
||||
/* Fill the short name slot. */
|
||||
int copy = min_t(int, sb->s_blocksize - offset, size);
|
||||
int copy = umin(sb->s_blocksize - offset, size);
|
||||
memcpy(bhs[i]->b_data + offset, slots, copy);
|
||||
mark_buffer_dirty_inode(bhs[i], dir);
|
||||
if (IS_DIRSYNC(dir))
|
||||
|
|
|
|||
|
|
@ -140,8 +140,7 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
|
|||
if (copy_from_user(&range, user_range, sizeof(range)))
|
||||
return -EFAULT;
|
||||
|
||||
range.minlen = max_t(unsigned int, range.minlen,
|
||||
bdev_discard_granularity(sb->s_bdev));
|
||||
range.minlen = max(range.minlen, bdev_discard_granularity(sb->s_bdev));
|
||||
|
||||
err = fat_trim_fs(inode, &range);
|
||||
if (err < 0)
|
||||
|
|
|
|||
|
|
@ -1813,7 +1813,7 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
|
|||
goto out_iput;
|
||||
|
||||
folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
|
||||
nr_bytes = min_t(unsigned, num, folio_size(folio) - folio_offset);
|
||||
nr_bytes = min(num, folio_size(folio) - folio_offset);
|
||||
nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
err = fuse_copy_folio(cs, &folio, folio_offset, nr_bytes, 0);
|
||||
|
|
|
|||
|
|
@ -1323,10 +1323,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
|
|||
static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
|
||||
unsigned int max_pages)
|
||||
{
|
||||
return min_t(unsigned int,
|
||||
((pos + len - 1) >> PAGE_SHIFT) -
|
||||
(pos >> PAGE_SHIFT) + 1,
|
||||
max_pages);
|
||||
return min(((pos + len - 1) >> PAGE_SHIFT) - (pos >> PAGE_SHIFT) + 1,
|
||||
max_pages);
|
||||
}
|
||||
|
||||
static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
|
||||
|
|
@ -1607,7 +1605,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
|||
struct folio *folio = page_folio(pages[i]);
|
||||
unsigned int offset = start +
|
||||
(folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
|
||||
unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
|
||||
unsigned int len = umin(ret, PAGE_SIZE - start);
|
||||
|
||||
ap->descs[ap->num_folios].offset = offset;
|
||||
ap->descs[ap->num_folios].length = len;
|
||||
|
|
|
|||
|
|
@ -1467,7 +1467,7 @@ static ssize_t iter_to_pipe(struct iov_iter *from,
|
|||
|
||||
n = DIV_ROUND_UP(left + start, PAGE_SIZE);
|
||||
for (i = 0; i < n; i++) {
|
||||
int size = min_t(int, left, PAGE_SIZE - start);
|
||||
int size = umin(left, PAGE_SIZE - start);
|
||||
|
||||
buf.page = pages[i];
|
||||
buf.offset = start;
|
||||
|
|
|
|||
Loading…
Reference in New Issue