dm error: Add support for zoned block devices
dm-error is used in several test cases in the xfstests test suite to check the handling of IO errors in file systems. However, with several file systems getting native support for zoned block devices (e.g. btrfs and f2fs), dm-error's lack of zoned block device support creates problems as the file system attempts executing zone commands (e.g. a zone append operation) against a dm-error non-zoned block device, which causes various issues in the block layer (e.g. WARN_ON triggers). This commit adds supports for zoned block devices to dm-error, allowing a DM device table containing an error target to be exposed as a zoned block device (if all targets have a compatible zoned model support and mapping). This is done as follows: 1) Allow passing 2 arguments to an error target, similar to dm-linear: a backing device and a start sector. These arguments are optional and dm-error retains its characteristics if the arguments are not specified. 2) Implement the iterate_devices method so that dm-core can normally check the zone support and restrictions (e.g. zone alignment of the targets). When the backing device arguments are not specified, the iterate_devices method never calls the fn() argument. When no backing device is specified, as before, we assume that the DM device is not zoned. When the backing device arguments are specified, the zoned model of the DM device will depend on the backing device type: - If the backing device is zoned and its model and mapping is compatible with other targets of the device, the resulting device will be zoned, with the dm-error mapped portion always returning errors (similar to the default non-zoned case). - If the backing device is not zoned, then the DM device will not be either. This zone support for dm-error requires the definition of a functional report_zones operation so that dm_revalidate_zones() can operate correctly and resources for emulating zone append operations initialized. This is necessary for cases where dm-error is used to partially map a device and have an overall correct handling of zone append. This means that dm-error does not fail report zones operations. Two changes that are not obvious are included to avoid issues: 1) dm_table_supports_zoned_model() is changed to directly check if the backing device of a wildcard target (= dm-error target) is zoned. Otherwise, we wouldn't be able to catch the invalid setup of dm-error without a backing device (non zoned case) being combined with zoned targets. 2) dm_table_supports_dax() is modified to return false if the wildcard target is found. Otherwise, when dm-error is set without a backing device, we end up with a NULL pointer dereference in set_dax_synchronous (dax_dev is NULL). This is consistent with the current behavior because dm_table_supports_dax() always returned false for targets that do not define the iterate_devices method. Signed-off-by: Damien Le Moal <dlemoal@kernel.org> Tested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
70bbeb29fa
commit
a951104333
|
@ -844,7 +844,8 @@ static bool dm_table_supports_dax(struct dm_table *t,
|
|||
if (!ti->type->direct_access)
|
||||
return false;
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
if (dm_target_is_wildcard(ti->type) ||
|
||||
!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, iterate_fn, NULL))
|
||||
return false;
|
||||
}
|
||||
|
@ -1587,6 +1588,14 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
|||
return blk_queue_zoned_model(q) != *zoned_model;
|
||||
}
|
||||
|
||||
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return blk_queue_zoned_model(q) != BLK_ZONED_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the device zoned model based on the target feature flag. If the target
|
||||
* has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
|
||||
|
@ -1600,6 +1609,18 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
|
|||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
/*
|
||||
* For the wildcard target (dm-error), if we do not have a
|
||||
* backing device, we must always return false. If we have a
|
||||
* backing device, the result must depend on checking zoned
|
||||
* model, like for any other target. So for this, check directly
|
||||
* if the target backing device is zoned as we get "false" when
|
||||
* dm-error was set without a backing device.
|
||||
*/
|
||||
if (dm_target_is_wildcard(ti->type) &&
|
||||
!ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
|
||||
return false;
|
||||
|
||||
if (dm_target_supports_zoned_hm(ti->type)) {
|
||||
if (!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, device_not_zoned_model,
|
||||
|
|
|
@ -116,8 +116,62 @@ EXPORT_SYMBOL(dm_unregister_target);
|
|||
* io-err: always fails an io, useful for bringing
|
||||
* up LVs that have holes in them.
|
||||
*/
|
||||
struct io_err_c {
|
||||
struct dm_dev *dev;
|
||||
sector_t start;
|
||||
};
|
||||
|
||||
static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
|
||||
{
|
||||
unsigned long long start;
|
||||
struct io_err_c *ioec;
|
||||
char dummy;
|
||||
int ret;
|
||||
|
||||
ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
|
||||
if (!ioec) {
|
||||
tt->error = "Cannot allocate io_err context";
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
|
||||
start != (sector_t)start) {
|
||||
tt->error = "Invalid device sector";
|
||||
goto bad;
|
||||
}
|
||||
ioec->start = start;
|
||||
|
||||
ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
|
||||
if (ret) {
|
||||
tt->error = "Device lookup failed";
|
||||
goto bad;
|
||||
}
|
||||
|
||||
tt->private = ioec;
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
kfree(ioec);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
|
||||
{
|
||||
/*
|
||||
* If we have arguments, assume it is the path to the backing
|
||||
* block device and its mapping start sector (same as dm-linear).
|
||||
* In this case, get the device so that we can get its limits.
|
||||
*/
|
||||
if (argc == 2) {
|
||||
int ret = io_err_get_args(tt, argc, args);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return error for discards instead of -EOPNOTSUPP
|
||||
*/
|
||||
|
@ -129,7 +183,12 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
|
|||
|
||||
static void io_err_dtr(struct dm_target *tt)
|
||||
{
|
||||
/* empty */
|
||||
struct io_err_c *ioec = tt->private;
|
||||
|
||||
if (ioec) {
|
||||
dm_put_device(tt, ioec->dev);
|
||||
kfree(ioec);
|
||||
}
|
||||
}
|
||||
|
||||
static int io_err_map(struct dm_target *tt, struct bio *bio)
|
||||
|
@ -149,6 +208,45 @@ static void io_err_release_clone_rq(struct request *clone,
|
|||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
|
||||
{
|
||||
struct io_err_c *ioec = ti->private;
|
||||
|
||||
return ioec->start + dm_target_offset(ti, bi_sector);
|
||||
}
|
||||
|
||||
static int io_err_report_zones(struct dm_target *ti,
|
||||
struct dm_report_zones_args *args, unsigned int nr_zones)
|
||||
{
|
||||
struct io_err_c *ioec = ti->private;
|
||||
|
||||
/*
|
||||
* This should never be called when we do not have a backing device
|
||||
* as that mean the target is not a zoned one.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!ioec))
|
||||
return -EIO;
|
||||
|
||||
return dm_report_zones(ioec->dev->bdev, ioec->start,
|
||||
io_err_map_sector(ti, args->next_sector),
|
||||
args, nr_zones);
|
||||
}
|
||||
#else
|
||||
#define io_err_report_zones NULL
|
||||
#endif
|
||||
|
||||
static int io_err_iterate_devices(struct dm_target *ti,
|
||||
iterate_devices_callout_fn fn, void *data)
|
||||
{
|
||||
struct io_err_c *ioec = ti->private;
|
||||
|
||||
if (!ioec)
|
||||
return 0;
|
||||
|
||||
return fn(ti, ioec->dev, ioec->start, ti->len, data);
|
||||
}
|
||||
|
||||
static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
{
|
||||
limits->max_discard_sectors = UINT_MAX;
|
||||
|
@ -165,15 +263,17 @@ static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
|||
|
||||
static struct target_type error_target = {
|
||||
.name = "error",
|
||||
.version = {1, 6, 0},
|
||||
.features = DM_TARGET_WILDCARD,
|
||||
.version = {1, 7, 0},
|
||||
.features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
|
||||
.ctr = io_err_ctr,
|
||||
.dtr = io_err_dtr,
|
||||
.map = io_err_map,
|
||||
.clone_and_map_rq = io_err_clone_and_map_rq,
|
||||
.release_clone_rq = io_err_release_clone_rq,
|
||||
.iterate_devices = io_err_iterate_devices,
|
||||
.io_hints = io_err_io_hints,
|
||||
.direct_access = io_err_dax_direct_access,
|
||||
.report_zones = io_err_report_zones,
|
||||
};
|
||||
|
||||
int __init dm_target_init(void)
|
||||
|
|
Loading…
Reference in New Issue