dm-table: fix checking for request-based stackable devices

dm-flakey: fix corrupt_bio_byte setup checks
 
 dm raid: add support for resync w/o metadata devices
 
 dm, dm-mpath, vm-vdo, dm-raid: small code simplification
 
 dm-verity: remove support for asynchronous hashes
 
 dm-zoned-target: close smatch warning
 
 dm-thin: update the documentation
 
 dm-thin: enable inline-crypto passthrough
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRnH8MwLyZDhyYfesYTAyx9YGnhbQUCaJC5oRQcbXBhdG9ja2FA
 cmVkaGF0LmNvbQAKCRATAyx9YGnhbX+RAP911rivQBhdJSiktvnSulBvSANzzNWa
 9pNIuHGCmX3RVwEA53xpdUBkWjvGbtrUwk2eu8SK728H9aOFrpaDImKdVws=
 =gUBT
 -----END PGP SIGNATURE-----

Merge tag 'for-6.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mikulas Patocka:

 - fix checking for request-based stackable devices (dm-table)

 - fix corrupt_bio_byte setup checks (dm-flakey)

 - add support for resync w/o metadata devices (dm raid)

 - small code simplification (dm, dm-mpath, vm-vdo, dm-raid)

 - remove support for asynchronous hashes (dm-verity)

 - close smatch warning (dm-zoned-target)

 - update the documentation and enable inline-crypto passthrough
   (dm-thin)

* tag 'for-6.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: set DM_TARGET_PASSES_CRYPTO feature for dm-thin
  dm-thin: update the documentation
  dm-raid: do not include dm-core.h
  vdo: omit need_resched() before cond_resched()
  md: dm-zoned-target: Initialize return variable r to avoid uninitialized use
  dm-verity: remove support for asynchronous hashes
  dm-mpath: don't print the "loaded" message if registering fails
  dm-mpath: make dm_unregister_path_selector return void
  dm: ima: avoid extra calls to strlen()
  dm: Simplify dm_io_complete()
  dm: Remove unnecessary return in dm_zone_endio()
  dm raid: add support for resync w/o metadata devices
  dm-flakey: Fix corrupt_bio_byte setup checks
  dm-table: fix checking for rq stackable devices
pull/1314/head
Linus Torvalds 2025-08-04 08:58:53 -07:00
commit d632ab86af
20 changed files with 110 additions and 261 deletions

View File

@ -80,11 +80,11 @@ less sharing than average you'll need a larger-than-average metadata device.
As a guide, we suggest you calculate the number of bytes to use in the
metadata device as 48 * $data_dev_size / $data_block_size but round it up
to 2MB if the answer is smaller. If you're creating large numbers of
to 2MiB if the answer is smaller. If you're creating large numbers of
snapshots which are recording large amounts of change, you may find you
need to increase this.
The largest size supported is 16GB: If the device is larger,
The largest size supported is 16GiB: If the device is larger,
a warning will be issued and the excess space will not be used.
Reloading a pool table
@ -107,13 +107,13 @@ Using an existing pool device
$data_block_size gives the smallest unit of disk space that can be
allocated at a time expressed in units of 512-byte sectors.
$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
multiple of 128 (64KB). $data_block_size cannot be changed after the
$data_block_size must be between 128 (64KiB) and 2097152 (1GiB) and a
multiple of 128 (64KiB). $data_block_size cannot be changed after the
thin-pool is created. People primarily interested in thin provisioning
may want to use a value such as 1024 (512KB). People doing lots of
snapshotting may want a smaller value such as 128 (64KB). If you are
may want to use a value such as 1024 (512KiB). People doing lots of
snapshotting may want a smaller value such as 128 (64KiB). If you are
not zeroing newly-allocated data, a larger $data_block_size in the
region of 256000 (128MB) is suggested.
region of 262144 (128MiB) is suggested.
$low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
@ -291,7 +291,7 @@ i) Constructor
error_if_no_space:
Error IOs, instead of queueing, if no space.
Data block size must be between 64KB (128 sectors) and 1GB
Data block size must be between 64KiB (128 sectors) and 1GiB
(2097152 sectors) inclusive.

View File

@ -215,16 +215,19 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (test_bit(DROP_WRITES, &fc->flags) &&
(fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
fc->random_write_corrupt)) {
ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_WRITES, &fc->flags) &&
(fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
fc->random_write_corrupt)) {
ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_READS, &fc->flags) &&
(fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
((fc->corrupt_bio_byte && fc->corrupt_bio_rw == READ) ||
fc->random_read_corrupt)) {
ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}

View File

@ -241,10 +241,11 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
/*
* First retrieve the target metadata.
*/
scnprintf(target_metadata_buf, DM_IMA_TARGET_METADATA_BUF_LEN,
"target_index=%d,target_begin=%llu,target_len=%llu,",
i, ti->begin, ti->len);
target_metadata_buf_len = strlen(target_metadata_buf);
target_metadata_buf_len =
scnprintf(target_metadata_buf,
DM_IMA_TARGET_METADATA_BUF_LEN,
"target_index=%d,target_begin=%llu,target_len=%llu,",
i, ti->begin, ti->len);
/*
* Then retrieve the actual target data.
@ -448,11 +449,9 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
if (r)
goto error;
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_resume=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l = strlen(device_table_data);
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_resume=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@ -561,10 +560,9 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_remove=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l = strlen(device_table_data);
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_remove=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
memcpy(device_table_data + l, remove_all_str, remove_all_len);
@ -647,10 +645,9 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error2;
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;table_clear=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l = strlen(device_table_data);
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;table_clear=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@ -706,7 +703,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
int r;
int r, len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@ -728,12 +725,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
md->ima.active_table.device_metadata = new_device_data;
md->ima.active_table.device_metadata_len = strlen(new_device_data);
scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
"%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
new_dev_name, new_dev_uuid, capacity_str);
len = scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
"%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
new_dev_name, new_dev_uuid, capacity_str);
dm_ima_measure_data("dm_device_rename", combined_device_data, strlen(combined_device_data),
noio);
dm_ima_measure_data("dm_device_rename", combined_device_data, len, noio);
goto exit;

View File

@ -117,16 +117,16 @@ int dm_register_path_selector(struct path_selector_type *pst)
}
EXPORT_SYMBOL_GPL(dm_register_path_selector);
int dm_unregister_path_selector(struct path_selector_type *pst)
void dm_unregister_path_selector(struct path_selector_type *pst)
{
struct ps_internal *psi;
down_write(&_ps_lock);
psi = __find_path_selector_type(pst->name);
if (!psi) {
if (WARN_ON(!psi)) {
up_write(&_ps_lock);
return -EINVAL;
return;
}
list_del(&psi->list);
@ -134,7 +134,5 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
up_write(&_ps_lock);
kfree(psi);
return 0;
}
EXPORT_SYMBOL_GPL(dm_unregister_path_selector);

View File

@ -96,7 +96,7 @@ struct path_selector_type {
int dm_register_path_selector(struct path_selector_type *type);
/* Unregister a path selector */
int dm_unregister_path_selector(struct path_selector_type *type);
void dm_unregister_path_selector(struct path_selector_type *type);
/* Returns a registered path selector type */
struct path_selector_type *dm_get_path_selector(const char *name);

View File

@ -541,8 +541,10 @@ static int __init dm_hst_init(void)
{
int r = dm_register_path_selector(&hst_ps);
if (r < 0)
if (r < 0) {
DMERR("register failed %d", r);
return r;
}
DMINFO("version " HST_VERSION " loaded");
@ -551,10 +553,7 @@ static int __init dm_hst_init(void)
static void __exit dm_hst_exit(void)
{
int r = dm_unregister_path_selector(&hst_ps);
if (r < 0)
DMERR("unregister failed %d", r);
dm_unregister_path_selector(&hst_ps);
}
module_init(dm_hst_init);

View File

@ -260,10 +260,7 @@ static int __init dm_ioa_init(void)
static void __exit dm_ioa_exit(void)
{
int ret = dm_unregister_path_selector(&ioa_ps);
if (ret < 0)
DMERR("unregister failed %d", ret);
dm_unregister_path_selector(&ioa_ps);
}
module_init(dm_ioa_init);

View File

@ -260,8 +260,10 @@ static int __init dm_ql_init(void)
{
int r = dm_register_path_selector(&ql_ps);
if (r < 0)
if (r < 0) {
DMERR("register failed %d", r);
return r;
}
DMINFO("version " QL_VERSION " loaded");
@ -270,10 +272,7 @@ static int __init dm_ql_init(void)
static void __exit dm_ql_exit(void)
{
int r = dm_unregister_path_selector(&ql_ps);
if (r < 0)
DMERR("unregister failed %d", r);
dm_unregister_path_selector(&ql_ps);
}
module_init(dm_ql_init);

View File

@ -220,8 +220,10 @@ static int __init dm_rr_init(void)
{
int r = dm_register_path_selector(&rr_ps);
if (r < 0)
if (r < 0) {
DMERR("register failed %d", r);
return r;
}
DMINFO("version " RR_VERSION " loaded");
@ -230,10 +232,7 @@ static int __init dm_rr_init(void)
static void __exit dm_rr_exit(void)
{
int r = dm_unregister_path_selector(&rr_ps);
if (r < 0)
DMERR("unregister failed %d", r);
dm_unregister_path_selector(&rr_ps);
}
module_init(dm_rr_init);

View File

@ -341,8 +341,10 @@ static int __init dm_st_init(void)
{
int r = dm_register_path_selector(&st_ps);
if (r < 0)
if (r < 0) {
DMERR("register failed %d", r);
return r;
}
DMINFO("version " ST_VERSION " loaded");
@ -351,10 +353,7 @@ static int __init dm_st_init(void)
static void __exit dm_st_exit(void)
{
int r = dm_unregister_path_selector(&st_ps);
if (r < 0)
DMERR("unregister failed %d", r);
dm_unregister_path_selector(&st_ps);
}
module_init(dm_st_init);

View File

@ -14,7 +14,6 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
#include "dm-core.h"
#include <linux/device-mapper.h>
@ -2532,6 +2531,10 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
struct md_rdev *rdev, *freshest;
struct mddev *mddev = &rs->md;
/* Respect resynchronization requested with "sync" argument. */
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
freshest = NULL;
rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags))
@ -3305,7 +3308,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
rs->md.dm_gendisk = ti->table->md->disk;
rs->md.dm_gendisk = dm_disk(dm_table_get_md(ti->table));
mddev_unlock(&rs->md);
return 0;

View File

@ -899,17 +899,17 @@ static bool dm_table_supports_dax(struct dm_table *t,
return true;
}
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
/* request-based cannot stack on partitions! */
if (bdev_is_partition(bdev))
return false;
return true;
return queue_is_mq(q);
return !queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)
@ -1005,7 +1005,7 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}

View File

@ -4111,8 +4111,8 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
.version = {1, 23, 0},
DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_CRYPTO,
.version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@ -4497,7 +4497,8 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
.version = {1, 23, 0},
.features = DM_TARGET_PASSES_CRYPTO,
.version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,

View File

@ -252,8 +252,7 @@ static void service_work_queue(struct simple_work_queue *queue)
* This speeds up some performance tests; that "other work" might include other VDO
* threads.
*/
if (need_resched())
cond_resched();
cond_resched();
}
run_finish_hook(queue);

View File

@ -191,7 +191,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
u8 *want_digest, u8 *data)
{
if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits,
verity_io_real_digest(v, io), true)))
verity_io_real_digest(v, io))))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
@ -392,7 +392,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits,
verity_io_real_digest(v, io), true);
verity_io_real_digest(v, io));
if (unlikely(r < 0))
return r;

View File

@ -19,7 +19,6 @@
#include "dm-audit.h"
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
#include <linux/security.h>
@ -61,9 +60,6 @@ module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644)
static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
static DEFINE_STATIC_KEY_FALSE(ahash_enabled);
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@ -118,100 +114,21 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
static int verity_ahash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
struct crypto_wait *wait)
{
struct scatterlist sg;
if (likely(!is_vmalloc_addr(data))) {
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
}
do {
int r;
size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
flush_kernel_vmap_range((void *)data, this_step);
sg_init_table(&sg, 1);
sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
ahash_request_set_crypt(req, &sg, NULL, this_step);
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r))
return r;
data += this_step;
len -= this_step;
} while (len);
return 0;
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_ahash_init(struct dm_verity *v, struct ahash_request *req,
struct crypto_wait *wait, bool may_sleep)
{
int r;
ahash_request_set_tfm(req, v->ahash_tfm);
ahash_request_set_callback(req,
may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
crypto_req_done, (void *)wait);
crypto_init_wait(wait);
r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
if (r != -ENOMEM)
DMERR("crypto_ahash_init failed: %d", r);
return r;
}
if (likely(v->salt_size && (v->version >= 1)))
r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_ahash_final(struct dm_verity *v, struct ahash_request *req,
u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("%s failed updating salt: %d", __func__, r);
goto out;
}
}
ahash_request_set_crypt(req, NULL, digest, 0);
r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
const u8 *data, size_t len, u8 *digest, bool may_sleep)
const u8 *data, size_t len, u8 *digest)
{
struct shash_desc *desc = &io->hash_desc;
int r;
if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) {
struct ahash_request *req = verity_io_hash_req(v, io);
struct crypto_wait wait;
r = verity_ahash_init(v, req, &wait, may_sleep) ?:
verity_ahash_update(v, req, data, len, &wait) ?:
verity_ahash_final(v, req, digest, &wait);
desc->tfm = v->shash_tfm;
if (unlikely(v->initial_hashstate == NULL)) {
/* Version 0: salt at end */
r = crypto_shash_init(desc) ?:
crypto_shash_update(desc, data, len) ?:
crypto_shash_update(desc, v->salt, v->salt_size) ?:
crypto_shash_final(desc, digest);
} else {
struct shash_desc *desc = verity_io_hash_req(v, io);
desc->tfm = v->shash_tfm;
/* Version 1: salt at beginning */
r = crypto_shash_import(desc, v->initial_hashstate) ?:
crypto_shash_finup(desc, data, len, digest);
}
@ -362,7 +279,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
verity_io_real_digest(v, io), !io->in_bh);
verity_io_real_digest(v, io));
if (unlikely(r < 0))
goto release_ret_r;
@ -465,7 +382,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
goto free_ret;
r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
verity_io_real_digest(v, io), true);
verity_io_real_digest(v, io));
if (unlikely(r))
goto free_ret;
@ -581,7 +498,7 @@ static int verity_verify_io(struct dm_verity_io *io)
}
r = verity_hash(v, io, data, block_size,
verity_io_real_digest(v, io), !io->in_bh);
verity_io_real_digest(v, io));
if (unlikely(r < 0)) {
kunmap_local(data);
return r;
@ -1092,12 +1009,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->zero_digest);
verity_free_sig(v);
if (v->ahash_tfm) {
static_branch_dec(&ahash_enabled);
crypto_free_ahash(v->ahash_tfm);
} else {
crypto_free_shash(v->shash_tfm);
}
crypto_free_shash(v->shash_tfm);
kfree(v->alg_name);
@ -1157,7 +1069,8 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
if (!v->zero_digest)
return r;
io = kmalloc(sizeof(*io) + v->hash_reqsize, GFP_KERNEL);
io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm),
GFP_KERNEL);
if (!io)
return r; /* verity_dtr will free zero_digest */
@ -1168,7 +1081,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
goto out;
r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
v->zero_digest, true);
v->zero_digest);
out:
kfree(io);
@ -1324,9 +1237,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
{
struct dm_target *ti = v->ti;
struct crypto_ahash *ahash;
struct crypto_shash *shash = NULL;
const char *driver_name;
struct crypto_shash *shash;
v->alg_name = kstrdup(alg_name, GFP_KERNEL);
if (!v->alg_name) {
@ -1334,50 +1245,14 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
return -ENOMEM;
}
/*
* Allocate the hash transformation object that this dm-verity instance
* will use. The vast majority of dm-verity users use CPU-based
* hashing, so when possible use the shash API to minimize the crypto
* API overhead. If the ahash API resolves to a different driver
* (likely an off-CPU hardware offload), use ahash instead. Also use
* ahash if the obsolete dm-verity format with the appended salt is
* being used, so that quirk only needs to be handled in one place.
*/
ahash = crypto_alloc_ahash(alg_name, 0,
v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(ahash)) {
shash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(shash)) {
ti->error = "Cannot initialize hash function";
return PTR_ERR(ahash);
}
driver_name = crypto_ahash_driver_name(ahash);
if (v->version >= 1 /* salt prepended, not appended? */) {
shash = crypto_alloc_shash(alg_name, 0, 0);
if (!IS_ERR(shash) &&
strcmp(crypto_shash_driver_name(shash), driver_name) != 0) {
/*
* ahash gave a different driver than shash, so probably
* this is a case of real hardware offload. Use ahash.
*/
crypto_free_shash(shash);
shash = NULL;
}
}
if (!IS_ERR_OR_NULL(shash)) {
crypto_free_ahash(ahash);
ahash = NULL;
v->shash_tfm = shash;
v->digest_size = crypto_shash_digestsize(shash);
v->hash_reqsize = sizeof(struct shash_desc) +
crypto_shash_descsize(shash);
DMINFO("%s using shash \"%s\"", alg_name, driver_name);
} else {
v->ahash_tfm = ahash;
static_branch_inc(&ahash_enabled);
v->digest_size = crypto_ahash_digestsize(ahash);
v->hash_reqsize = sizeof(struct ahash_request) +
crypto_ahash_reqsize(ahash);
DMINFO("%s using ahash \"%s\"", alg_name, driver_name);
return PTR_ERR(shash);
}
v->shash_tfm = shash;
v->digest_size = crypto_shash_digestsize(shash);
DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash));
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
return -EINVAL;
@ -1402,7 +1277,7 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
return -EINVAL;
}
}
if (v->shash_tfm) {
if (v->version) { /* Version 1: salt at beginning */
SHASH_DESC_ON_STACK(desc, v->shash_tfm);
int r;
@ -1681,7 +1556,8 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
ti->per_io_data_size = sizeof(struct dm_verity_io) + v->hash_reqsize;
ti->per_io_data_size = sizeof(struct dm_verity_io) +
crypto_shash_descsize(v->shash_tfm);
r = verity_fec_ctr(v);
if (r)
@ -1788,10 +1664,7 @@ static int verity_preresume(struct dm_target *ti)
bdev = dm_disk(dm_table_get_md(ti->table))->part0;
root_digest.digest = v->root_digest;
root_digest.digest_len = v->digest_size;
if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm)
root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm);
else
root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
sizeof(root_digest));
@ -1817,7 +1690,7 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.version = {1, 11, 0},
.version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,

View File

@ -39,11 +39,10 @@ struct dm_verity {
struct dm_target *ti;
struct dm_bufio_client *bufio;
char *alg_name;
struct crypto_ahash *ahash_tfm; /* either this or shash_tfm is set */
struct crypto_shash *shash_tfm; /* either this or ahash_tfm is set */
struct crypto_shash *shash_tfm;
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
u8 *initial_hashstate; /* salted initial state, if version >= 1 */
u8 *zero_digest; /* digest for a zero block */
#ifdef CONFIG_SECURITY
u8 *root_digest_sig; /* signature of the root digest */
@ -61,7 +60,6 @@ struct dm_verity {
bool hash_failed:1; /* set if hash of any block failed */
bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int hash_reqsize; /* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
enum verity_mode error_mode;/* mode for handling I/O errors */
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
@ -100,19 +98,13 @@ struct dm_verity_io {
u8 want_digest[HASH_MAX_DIGESTSIZE];
/*
* This struct is followed by a variable-sized hash request of size
* v->hash_reqsize, either a struct ahash_request or a struct shash_desc
* (depending on whether ahash_tfm or shash_tfm is being used). To
* access it, use verity_io_hash_req().
* Temporary space for hashing. This is variable-length and must be at
* the end of the struct. struct shash_desc is just the fixed part;
* it's followed by a context of size crypto_shash_descsize(shash_tfm).
*/
struct shash_desc hash_desc;
};
static inline void *verity_io_hash_req(struct dm_verity *v,
struct dm_verity_io *io)
{
return io + 1;
}
static inline u8 *verity_io_real_digest(struct dm_verity *v,
struct dm_verity_io *io)
{
@ -126,7 +118,7 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
}
extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
const u8 *data, size_t len, u8 *digest, bool may_sleep);
const u8 *data, size_t len, u8 *digest);
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);

View File

@ -467,8 +467,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
bdev_offset_from_zone_start(disk->part0,
clone->bi_iter.bi_sector);
}
return;
}
static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,

View File

@ -1062,7 +1062,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
struct dmz_target *dmz = ti->private;
unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
sector_t capacity;
int i, r;
int i, r = 0;
for (i = 0; i < dmz->nr_ddevs; i++) {
capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);

View File

@ -1024,10 +1024,8 @@ static void dm_wq_requeue_work(struct work_struct *work)
*
* 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
*/
static void dm_io_complete(struct dm_io *io)
static inline void dm_io_complete(struct dm_io *io)
{
bool first_requeue;
/*
* Only dm_io that has been split needs two stage requeue, otherwise
* we may run into long bio clone chain during suspend and OOM could
@ -1036,12 +1034,7 @@ static void dm_io_complete(struct dm_io *io)
* Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
* also aren't handled via the first stage requeue.
*/
if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
first_requeue = true;
else
first_requeue = false;
__dm_io_complete(io, first_requeue);
__dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
}
/*