drbd: atomically update queue limits in drbd_reconsider_queue_parameters
Switch drbd_reconsider_queue_parameters to set up the queue parameters in an on-stack queue_limits structure and apply the atomically. Remove various helpers that have become so trivial that they can be folded into drbd_reconsider_queue_parameters. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20240305134041.137006-8-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5eaee6e9c8
commit
e6dfe748f0
|
@ -1216,11 +1216,6 @@ static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
|
||||||
return DRBD_MAX_BIO_SIZE;
|
return DRBD_MAX_BIO_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
|
|
||||||
{
|
|
||||||
q->limits.discard_granularity = granularity;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
|
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
|
||||||
{
|
{
|
||||||
/* when we introduced REQ_WRITE_SAME support, we also bumped
|
/* when we introduced REQ_WRITE_SAME support, we also bumped
|
||||||
|
@ -1247,62 +1242,6 @@ static bool drbd_discard_supported(struct drbd_connection *connection,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void decide_on_discard_support(struct drbd_device *device,
|
|
||||||
struct drbd_backing_dev *bdev)
|
|
||||||
{
|
|
||||||
struct drbd_connection *connection =
|
|
||||||
first_peer_device(device)->connection;
|
|
||||||
struct request_queue *q = device->rq_queue;
|
|
||||||
unsigned int max_discard_sectors;
|
|
||||||
|
|
||||||
if (!drbd_discard_supported(connection, bdev))
|
|
||||||
goto not_supported;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We don't care for the granularity, really.
|
|
||||||
*
|
|
||||||
* Stacking limits below should fix it for the local device. Whether or
|
|
||||||
* not it is a suitable granularity on the remote device is not our
|
|
||||||
* problem, really. If you care, you need to use devices with similar
|
|
||||||
* topology on all peers.
|
|
||||||
*/
|
|
||||||
blk_queue_discard_granularity(q, 512);
|
|
||||||
max_discard_sectors = drbd_max_discard_sectors(connection);
|
|
||||||
blk_queue_max_discard_sectors(q, max_discard_sectors);
|
|
||||||
return;
|
|
||||||
|
|
||||||
not_supported:
|
|
||||||
blk_queue_discard_granularity(q, 0);
|
|
||||||
blk_queue_max_discard_sectors(q, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
|
|
||||||
{
|
|
||||||
/* Fixup max_write_zeroes_sectors after blk_stack_limits():
|
|
||||||
* if we can handle "zeroes" efficiently on the protocol,
|
|
||||||
* we want to do that, even if our backend does not announce
|
|
||||||
* max_write_zeroes_sectors itself. */
|
|
||||||
struct drbd_connection *connection = first_peer_device(device)->connection;
|
|
||||||
/* If the peer announces WZEROES support, use it. Otherwise, rather
|
|
||||||
* send explicit zeroes than rely on some discard-zeroes-data magic. */
|
|
||||||
if (connection->agreed_features & DRBD_FF_WZEROES)
|
|
||||||
q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
|
|
||||||
else
|
|
||||||
q->limits.max_write_zeroes_sectors = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
|
|
||||||
{
|
|
||||||
unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
|
|
||||||
unsigned int discard_granularity =
|
|
||||||
device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
|
|
||||||
|
|
||||||
if (discard_granularity > max_discard) {
|
|
||||||
blk_queue_discard_granularity(q, 0);
|
|
||||||
blk_queue_max_discard_sectors(q, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is the workaround for "bio would need to, but cannot, be split" */
|
/* This is the workaround for "bio would need to, but cannot, be split" */
|
||||||
static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
|
static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
|
||||||
{
|
{
|
||||||
|
@ -1320,8 +1259,11 @@ static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
|
||||||
void drbd_reconsider_queue_parameters(struct drbd_device *device,
|
void drbd_reconsider_queue_parameters(struct drbd_device *device,
|
||||||
struct drbd_backing_dev *bdev, struct o_qlim *o)
|
struct drbd_backing_dev *bdev, struct o_qlim *o)
|
||||||
{
|
{
|
||||||
|
struct drbd_connection *connection =
|
||||||
|
first_peer_device(device)->connection;
|
||||||
struct request_queue * const q = device->rq_queue;
|
struct request_queue * const q = device->rq_queue;
|
||||||
unsigned int now = queue_max_hw_sectors(q) << 9;
|
unsigned int now = queue_max_hw_sectors(q) << 9;
|
||||||
|
struct queue_limits lim;
|
||||||
struct request_queue *b = NULL;
|
struct request_queue *b = NULL;
|
||||||
unsigned int new;
|
unsigned int new;
|
||||||
|
|
||||||
|
@ -1348,24 +1290,55 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
|
||||||
drbd_info(device, "max BIO size = %u\n", new);
|
drbd_info(device, "max BIO size = %u\n", new);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lim = queue_limits_start_update(q);
|
||||||
if (bdev) {
|
if (bdev) {
|
||||||
blk_set_stacking_limits(&q->limits);
|
blk_set_stacking_limits(&lim);
|
||||||
blk_queue_max_segments(q,
|
lim.max_segments = drbd_backing_dev_max_segments(device);
|
||||||
drbd_backing_dev_max_segments(device));
|
|
||||||
} else {
|
} else {
|
||||||
blk_queue_max_segments(q, BLK_MAX_SEGMENTS);
|
lim.max_segments = BLK_MAX_SEGMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_queue_max_hw_sectors(q, new >> SECTOR_SHIFT);
|
lim.max_hw_sectors = new >> SECTOR_SHIFT;
|
||||||
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
|
lim.seg_boundary_mask = PAGE_SIZE - 1;
|
||||||
decide_on_discard_support(device, bdev);
|
|
||||||
|
|
||||||
if (bdev) {
|
/*
|
||||||
blk_stack_limits(&q->limits, &b->limits, 0);
|
* We don't care for the granularity, really.
|
||||||
disk_update_readahead(device->vdisk);
|
*
|
||||||
|
* Stacking limits below should fix it for the local device. Whether or
|
||||||
|
* not it is a suitable granularity on the remote device is not our
|
||||||
|
* problem, really. If you care, you need to use devices with similar
|
||||||
|
* topology on all peers.
|
||||||
|
*/
|
||||||
|
if (drbd_discard_supported(connection, bdev)) {
|
||||||
|
lim.discard_granularity = 512;
|
||||||
|
lim.max_hw_discard_sectors =
|
||||||
|
drbd_max_discard_sectors(connection);
|
||||||
|
} else {
|
||||||
|
lim.discard_granularity = 0;
|
||||||
|
lim.max_hw_discard_sectors = 0;
|
||||||
}
|
}
|
||||||
fixup_write_zeroes(device, q);
|
|
||||||
fixup_discard_support(device, q);
|
if (bdev)
|
||||||
|
blk_stack_limits(&lim, &b->limits, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we can handle "zeroes" efficiently on the protocol, we want to do
|
||||||
|
* that, even if our backend does not announce max_write_zeroes_sectors
|
||||||
|
* itself.
|
||||||
|
*/
|
||||||
|
if (connection->agreed_features & DRBD_FF_WZEROES)
|
||||||
|
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
|
||||||
|
else
|
||||||
|
lim.max_write_zeroes_sectors = 0;
|
||||||
|
|
||||||
|
if ((lim.discard_granularity >> SECTOR_SHIFT) >
|
||||||
|
lim.max_hw_discard_sectors) {
|
||||||
|
lim.discard_granularity = 0;
|
||||||
|
lim.max_hw_discard_sectors = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (queue_limits_commit_update(q, &lim))
|
||||||
|
drbd_err(device, "setting new queue limits failed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Starts the worker thread */
|
/* Starts the worker thread */
|
||||||
|
|
Loading…
Reference in New Issue