summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-20 09:48:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-20 09:48:56 -0800
commitd8ba32c5a460837a5f0b9619dac99fafb6faef07 (patch)
treed02365c1172c05a523f527f2e64ce1ffbac3cf9c /block
parentd245b2e53e816716637be508c90190ef471457c7 (diff)
parentaf65faf34f6e9919bdd2912770d25d2a73cbcc7c (diff)
Merge tag 'block-6.19-20251218' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - ublk selftests for missing coverage - two fixes for the block integrity code - fix for the newly added newly added PR read keys ioctl, limiting the memory that can be allocated - work around for a deadlock that can occur with ublk, where partition scanning ends up recursing back into file closure, which needs the same mutex grabbed. Not the prettiest thing in the world, but an acceptable work-around until we can eliminate the reliance on disk->open_mutex for this - fix for a race between enabling writeback throttling and new IO submissions - move a bit of bio flag handling code. No changes, but needed for a patchset for a future kernel - fix for an init time id leak failure in rnbd - loop/zloop state check fix * tag 'block-6.19-20251218' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: block: validate interval_exp integrity limit block: validate pi_offset integrity limit block: rnbd-clt: Fix leaked ID in init_dev() ublk: fix deadlock when reading partition table block: add allocation size check in blkdev_pr_read_keys() Documentation: admin-guide: blockdev: replace zone_capacity with zone_capacity_mb when creating devices zloop: use READ_ONCE() to read lo->lo_state in queue_rq path loop: use READ_ONCE() to read lo->lo_state without locking block: fix race between wbt_enable_default and IO submission selftests: ublk: add user copy test cases selftests: ublk: add support for user copy to kublk selftests: ublk: forbid multiple data copy modes selftests: ublk: don't share backing files between ublk servers selftests: ublk: use auto_zc for PER_IO_DAEMON tests in stress_04 selftests: ublk: fix fio arguments in run_io_and_recover() selftests: ublk: remove unused ios map in seq_io.bt selftests: ublk: correct last_rw map type in seq_io.bt selftests: ublk: fix overflow in ublk_queue_auto_zc_fallback() block: move around bio flagging helpers
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/blk-settings.c14
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-wbt.c20
-rw-r--r--block/blk-wbt.h5
-rw-r--r--block/elevator.c4
-rw-r--r--block/elevator.h1
-rw-r--r--block/ioctl.c9
8 files changed, 37 insertions, 20 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4a8d3d96bfe4..6e54b1d3d8bc 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7181,7 +7181,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
blk_stat_disable_accounting(bfqd->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISABLE_WBT_DEF, bfqd->queue);
- set_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, &e->flags);
+ wbt_enable_default(bfqd->queue->disk);
kfree(bfqd);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 51401f08ce05..a9e65dc090da 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -161,10 +161,9 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
return -EINVAL;
}
- if (bi->pi_tuple_size > bi->metadata_size) {
- pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
- bi->pi_tuple_size,
- bi->metadata_size);
+ if (bi->pi_offset + bi->pi_tuple_size > bi->metadata_size) {
+ pr_warn("pi_offset (%u) + pi_tuple_size (%u) exceeds metadata_size (%u)\n",
+ bi->pi_offset, bi->pi_tuple_size, bi->metadata_size);
return -EINVAL;
}
@@ -194,8 +193,13 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
break;
}
- if (!bi->interval_exp)
+ if (!bi->interval_exp) {
bi->interval_exp = ilog2(lim->logical_block_size);
+ } else if (bi->interval_exp < SECTOR_SHIFT ||
+ bi->interval_exp > ilog2(lim->logical_block_size)) {
+ pr_warn("invalid interval_exp %u\n", bi->interval_exp);
+ return -EINVAL;
+ }
/*
* The PI generation / validation helpers do not expect intervals to
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8684c57498cc..e0a70d26972b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -932,7 +932,7 @@ int blk_register_queue(struct gendisk *disk)
elevator_set_default(q);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
- wbt_enable_default(disk);
+ wbt_init_enable_default(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index eb8037bae0bd..0974875f77bd 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -699,7 +699,7 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
/*
* Enable wbt if defaults are configured that way
*/
-void wbt_enable_default(struct gendisk *disk)
+static bool __wbt_enable_default(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct rq_qos *rqos;
@@ -716,19 +716,31 @@ void wbt_enable_default(struct gendisk *disk)
if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
mutex_unlock(&disk->rqos_state_mutex);
- return;
+ return false;
}
mutex_unlock(&disk->rqos_state_mutex);
/* Queue not registered? Maybe shutting down... */
if (!blk_queue_registered(q))
- return;
+ return false;
if (queue_is_mq(q) && enable)
- wbt_init(disk);
+ return true;
+ return false;
+}
+
+void wbt_enable_default(struct gendisk *disk)
+{
+ __wbt_enable_default(disk);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);
+void wbt_init_enable_default(struct gendisk *disk)
+{
+ if (__wbt_enable_default(disk))
+ WARN_ON_ONCE(wbt_init(disk));
+}
+
u64 wbt_default_latency_nsec(struct request_queue *q)
{
/*
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index e5fc653b9b76..925f22475738 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -5,6 +5,7 @@
#ifdef CONFIG_BLK_WBT
int wbt_init(struct gendisk *disk);
+void wbt_init_enable_default(struct gendisk *disk);
void wbt_disable_default(struct gendisk *disk);
void wbt_enable_default(struct gendisk *disk);
@@ -16,6 +17,10 @@ u64 wbt_default_latency_nsec(struct request_queue *);
#else
+static inline void wbt_init_enable_default(struct gendisk *disk)
+{
+}
+
static inline void wbt_disable_default(struct gendisk *disk)
{
}
diff --git a/block/elevator.c b/block/elevator.c
index 5b37ef44f52d..a2f8b2251dc6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -633,14 +633,10 @@ static int elevator_change_done(struct request_queue *q,
.et = ctx->old->et,
.data = ctx->old->elevator_data
};
- bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
- &ctx->old->flags);
elv_unregister_queue(q, ctx->old);
blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
kobject_put(&ctx->old->kobj);
- if (enable_wbt)
- wbt_enable_default(q->disk);
}
if (ctx->new) {
ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
diff --git a/block/elevator.h b/block/elevator.h
index a9d092c5a9e8..3eb32516be0b 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -156,7 +156,6 @@ struct elevator_queue
#define ELEVATOR_FLAG_REGISTERED 0
#define ELEVATOR_FLAG_DYING 1
-#define ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT 2
/*
* block elevator interface
diff --git a/block/ioctl.c b/block/ioctl.c
index 61feed686418..344478348a54 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -442,11 +442,12 @@ static int blkdev_pr_read_keys(struct block_device *bdev, blk_mode_t mode,
if (copy_from_user(&read_keys, arg, sizeof(read_keys)))
return -EFAULT;
- keys_info_len = struct_size(keys_info, keys, read_keys.num_keys);
- if (keys_info_len == SIZE_MAX)
+ if (read_keys.num_keys > PR_KEYS_MAX)
return -EINVAL;
- keys_info = kzalloc(keys_info_len, GFP_KERNEL);
+ keys_info_len = struct_size(keys_info, keys, read_keys.num_keys);
+
+ keys_info = kvzalloc(keys_info_len, GFP_KERNEL);
if (!keys_info)
return -ENOMEM;
@@ -473,7 +474,7 @@ static int blkdev_pr_read_keys(struct block_device *bdev, blk_mode_t mode,
if (copy_to_user(arg, &read_keys, sizeof(read_keys)))
ret = -EFAULT;
out:
- kfree(keys_info);
+ kvfree(keys_info);
return ret;
}