Message ID | 20240215070300.2200308-9-hch@lst.de |
---|---|
State | New |
Headers | show |
Series | [01/17] ubd: pass queue_limits to blk_mq_alloc_disk | expand |
On Thu, Feb 15, 2024 at 8:03 AM Christoph Hellwig <hch@lst.de> wrote: > > Pass the limits rnbd-clt imposes directly to blk_mq_alloc_disk instead > of setting them one at a time. > > While at it don't set an explicit number of discard segments, as 1 is > the default (which most drivers rely on). > > Signed-off-by: Christoph Hellwig <hch@lst.de> lgtm, thx! Acked-by: Jack Wang <jinpu.wang@ionos.com> > --- > drivers/block/rnbd/rnbd-clt.c | 64 ++++++++++++++--------------------- > 1 file changed, 25 insertions(+), 39 deletions(-) > > diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c > index d51be4f2df61a3..b7ffe03c61606d 100644 > --- a/drivers/block/rnbd/rnbd-clt.c > +++ b/drivers/block/rnbd/rnbd-clt.c > @@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) > } > } > > -static void setup_request_queue(struct rnbd_clt_dev *dev, > - struct rnbd_msg_open_rsp *rsp) > -{ > - blk_queue_logical_block_size(dev->queue, > - le16_to_cpu(rsp->logical_block_size)); > - blk_queue_physical_block_size(dev->queue, > - le16_to_cpu(rsp->physical_block_size)); > - blk_queue_max_hw_sectors(dev->queue, > - dev->sess->max_io_size / SECTOR_SIZE); > - > - /* > - * we don't support discards to "discontiguous" segments > - * in on request > - */ > - blk_queue_max_discard_segments(dev->queue, 1); > - > - blk_queue_max_discard_sectors(dev->queue, > - le32_to_cpu(rsp->max_discard_sectors)); > - dev->queue->limits.discard_granularity = > - le32_to_cpu(rsp->discard_granularity); > - dev->queue->limits.discard_alignment = > - le32_to_cpu(rsp->discard_alignment); > - if (le16_to_cpu(rsp->secure_discard)) > - blk_queue_max_secure_erase_sectors(dev->queue, > - le32_to_cpu(rsp->max_discard_sectors)); > - blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); > - blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); > - blk_queue_max_segments(dev->queue, dev->sess->max_segments); > - blk_queue_io_opt(dev->queue, dev->sess->max_io_size); > - blk_queue_virt_boundary(dev->queue, SZ_4K - 1); > - blk_queue_write_cache(dev->queue, > - !!(rsp->cache_policy & RNBD_WRITEBACK), > - !!(rsp->cache_policy & RNBD_FUA)); > - blk_queue_max_write_zeroes_sectors(dev->queue, > - le32_to_cpu(rsp->max_write_zeroes_sectors)); > -} > - > static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, > struct rnbd_msg_open_rsp *rsp, int idx) > { > @@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, > static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, > struct rnbd_msg_open_rsp *rsp) > { > + struct queue_limits lim = { > + .logical_block_size = le16_to_cpu(rsp->logical_block_size), > + .physical_block_size = le16_to_cpu(rsp->physical_block_size), > + .io_opt = dev->sess->max_io_size, > + .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE, > + .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors), > + .discard_granularity = le32_to_cpu(rsp->discard_granularity), > + .discard_alignment = le32_to_cpu(rsp->discard_alignment), > + .max_segments = dev->sess->max_segments, > + .virt_boundary_mask = SZ_4K - 1, > + .max_write_zeroes_sectors = > + le32_to_cpu(rsp->max_write_zeroes_sectors), > + }; > int idx = dev->clt_device_id; > > dev->size = le64_to_cpu(rsp->nsectors) * > le16_to_cpu(rsp->logical_block_size); > > - dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, NULL, dev); > + if (rsp->secure_discard) { > + lim.max_secure_erase_sectors = > + le32_to_cpu(rsp->max_discard_sectors); > + } > + > + dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); > if (IS_ERR(dev->gd)) > return PTR_ERR(dev->gd); > dev->queue = dev->gd->queue; > rnbd_init_mq_hw_queues(dev); > > - setup_request_queue(dev, rsp); > + blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); > + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); > + blk_queue_write_cache(dev->queue, > + !!(rsp->cache_policy & RNBD_WRITEBACK), > + !!(rsp->cache_policy & RNBD_FUA)); > + > return rnbd_clt_setup_gen_disk(dev, rsp, idx); > } > > -- > 2.39.2 >
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index d51be4f2df61a3..b7ffe03c61606d 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) } } -static void setup_request_queue(struct rnbd_clt_dev *dev, - struct rnbd_msg_open_rsp *rsp) -{ - blk_queue_logical_block_size(dev->queue, - le16_to_cpu(rsp->logical_block_size)); - blk_queue_physical_block_size(dev->queue, - le16_to_cpu(rsp->physical_block_size)); - blk_queue_max_hw_sectors(dev->queue, - dev->sess->max_io_size / SECTOR_SIZE); - - /* - * we don't support discards to "discontiguous" segments - * in on request - */ - blk_queue_max_discard_segments(dev->queue, 1); - - blk_queue_max_discard_sectors(dev->queue, - le32_to_cpu(rsp->max_discard_sectors)); - dev->queue->limits.discard_granularity = - le32_to_cpu(rsp->discard_granularity); - dev->queue->limits.discard_alignment = - le32_to_cpu(rsp->discard_alignment); - if (le16_to_cpu(rsp->secure_discard)) - blk_queue_max_secure_erase_sectors(dev->queue, - le32_to_cpu(rsp->max_discard_sectors)); - blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); - blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); - blk_queue_max_segments(dev->queue, dev->sess->max_segments); - blk_queue_io_opt(dev->queue, dev->sess->max_io_size); - blk_queue_virt_boundary(dev->queue, SZ_4K - 1); - blk_queue_write_cache(dev->queue, - !!(rsp->cache_policy & RNBD_WRITEBACK), - !!(rsp->cache_policy & RNBD_FUA)); - blk_queue_max_write_zeroes_sectors(dev->queue, - le32_to_cpu(rsp->max_write_zeroes_sectors)); -} - static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, struct rnbd_msg_open_rsp *rsp, int idx) { @@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, struct rnbd_msg_open_rsp *rsp) { + struct queue_limits lim = { + .logical_block_size = le16_to_cpu(rsp->logical_block_size), + .physical_block_size = le16_to_cpu(rsp->physical_block_size), + .io_opt = dev->sess->max_io_size, + .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE, + .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors), + .discard_granularity = le32_to_cpu(rsp->discard_granularity), + .discard_alignment = le32_to_cpu(rsp->discard_alignment), + .max_segments = dev->sess->max_segments, + .virt_boundary_mask = SZ_4K - 1, + .max_write_zeroes_sectors = + le32_to_cpu(rsp->max_write_zeroes_sectors), + }; int idx = dev->clt_device_id; dev->size = le64_to_cpu(rsp->nsectors) * le16_to_cpu(rsp->logical_block_size); - dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, NULL, dev); + if (rsp->secure_discard) { + lim.max_secure_erase_sectors = + le32_to_cpu(rsp->max_discard_sectors); + } + + dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); if (IS_ERR(dev->gd)) return PTR_ERR(dev->gd); dev->queue = dev->gd->queue; rnbd_init_mq_hw_queues(dev); - setup_request_queue(dev, rsp); + blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); + blk_queue_write_cache(dev->queue, + !!(rsp->cache_policy & RNBD_WRITEBACK), + !!(rsp->cache_policy & RNBD_FUA)); + return rnbd_clt_setup_gen_disk(dev, rsp, idx); }
Pass the limits rnbd-clt imposes directly to blk_mq_alloc_disk instead of setting them one at a time. While at it don't set an explicit number of discard segments, as 1 is the default (which most drivers rely on). Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/block/rnbd/rnbd-clt.c | 64 ++++++++++++++--------------------- 1 file changed, 25 insertions(+), 39 deletions(-)