@@ -98,6 +98,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
{
struct queue_limits lim = {
.logical_block_size = bsize,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct nfhd_device *dev;
int dev_id = id - NFHD_DEV_OFFSET;
@@ -882,7 +882,6 @@ static int ubd_add(int n, char **error_out)
goto out_cleanup_tags;
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
disk->major = UBD_MAJOR;
disk->first_minor = n << UBD_SHIFT;
disk->minors = 1 << UBD_SHIFT;
@@ -263,6 +263,9 @@ static const struct proc_ops simdisk_proc_ops = {
static int __init simdisk_setup(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
char tmp[2] = { '0' + which, 0 };
int err;
@@ -271,7 +274,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->gd = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ dev->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(dev->gd)) {
err = PTR_ERR(dev->gd);
goto out;
@@ -84,7 +84,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
- QUEUE_FLAG_NAME(NONROT),
QUEUE_FLAG_NAME(IO_STAT),
QUEUE_FLAG_NAME(NOXMERGES),
QUEUE_FLAG_NAME(ADD_RANDOM),
@@ -263,6 +263,39 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
return queue_var_show(queue_dma_alignment(q), page);
}
+static ssize_t queue_feature_store(struct request_queue *q, const char *page,
+ size_t count, unsigned int feature)
+{
+ struct queue_limits lim;
+ unsigned long val;
+ ssize_t ret;
+
+ ret = queue_var_store(&val, page, count);
+ if (ret < 0)
+ return ret;
+
+ lim = queue_limits_start_update(q);
+ if (val)
+ lim.features |= feature;
+ else
+ lim.features &= ~feature;
+ ret = queue_limits_commit_update(q, &lim);
+ if (ret)
+ return ret;
+ return count;
+}
+
+#define QUEUE_SYSFS_FEATURE(_name, _feature) \
+static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \
+{ \
+ return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \
+} \
+static ssize_t queue_##_name##_store(struct request_queue *q, \
+ const char *page, size_t count) \
+{ \
+ return queue_feature_store(q, page, count, _feature); \
+}
+
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
static ssize_t \
queue_##name##_show(struct request_queue *q, char *page) \
@@ -289,7 +322,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
return ret; \
}
-QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
+QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
@@ -526,7 +559,7 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_logical_block_size_show,
};
-QUEUE_RW_ENTRY(queue_nonrot, "rotational");
+QUEUE_RW_ENTRY(queue_rotational, "rotational");
QUEUE_RW_ENTRY(queue_iostats, "iostats");
QUEUE_RW_ENTRY(queue_random, "add_random");
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
@@ -624,7 +657,7 @@ static struct attribute *queue_attrs[] = {
&queue_write_zeroes_max_entry.attr,
&queue_zone_append_max_entry.attr,
&queue_zone_write_granularity_entry.attr,
- &queue_nonrot_entry.attr,
+ &queue_rotational_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
&queue_max_open_zones_entry.attr,
@@ -1776,10 +1776,13 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
@@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp)
struct queue_limits lim = {
.max_hw_sectors = aoe_maxsectors,
.io_opt = SZ_2M,
+ .features = BLK_FEAT_ROTATIONAL,
};
ulong flags;
int late = 0;
@@ -1992,9 +1992,12 @@ static const struct blk_mq_ops ataflop_mq_ops = {
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
@@ -366,8 +366,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
- /* Tell the block layer that this is not a rotational device */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
@@ -2697,7 +2697,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
* connect.
*/
.max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
- .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA,
+ .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
+ BLK_FEAT_ROTATIONAL,
};
device = minor_to_device(minor);
@@ -4516,7 +4516,8 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct queue_limits lim = {
- .max_hw_sectors = 64,
+ .max_hw_sectors = 64,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;
@@ -985,13 +985,11 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
lim.logical_block_size = bsize;
lim.physical_block_size = bsize;
lim.io_min = bsize;
- lim.features &= ~BLK_FEAT_WRITE_CACHE;
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
lim.features |= BLK_FEAT_WRITE_CACHE;
- if (!backing_bdev || bdev_nonrot(backing_bdev))
- blk_queue_flag_set(QUEUE_FLAG_NONROT, lo->lo_queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, lo->lo_queue);
+ if (backing_bdev && !bdev_nonrot(backing_bdev))
+ lim.features |= BLK_FEAT_ROTATIONAL;
loop_config_discard(lo, &lim);
return queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -3485,7 +3485,6 @@ static int mtip_block_initialize(struct driver_data *dd)
goto start_service_thread;
/* Set device limits. */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
@@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_capacity(disk, size >> SECTOR_SHIFT);
set_disk_ro(disk, 1);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
@@ -1867,11 +1867,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
goto out_err_disk;
}
- /*
- * Tell the block layer that we are not a rotational device
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
/*
@@ -1948,7 +1948,6 @@ static int null_add_dev(struct nullb_device *dev)
}
nullb->q->queuedata = nullb;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0)
@@ -2622,6 +2622,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
struct queue_limits lim = {
.max_hw_sectors = PACKET_MAX_SECTORS,
.logical_block_size = CD_FRAMESIZE,
+ .features = BLK_FEAT_ROTATIONAL,
};
int idx;
int ret = -ENOMEM;
@@ -388,7 +388,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
.max_segments = -1,
.max_segment_size = dev->bounce_size,
.dma_alignment = dev->blk_size - 1,
- .features = BLK_FEAT_WRITE_CACHE,
+ .features = BLK_FEAT_WRITE_CACHE |
+ BLK_FEAT_ROTATIONAL,
};
struct gendisk *gendisk;
@@ -4997,9 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
-
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
@@ -1352,10 +1352,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- /*
- * Network device does not need rotational
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
put_disk(dev->gd);
@@ -791,6 +791,7 @@ static int probe_disk(struct vdc_port *port)
.seg_boundary_mask = PAGE_SIZE - 1,
.max_segment_size = PAGE_SIZE,
.max_segments = port->ring_cookies,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct request_queue *q;
struct gendisk *g;
@@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
static int swim_floppy_init(struct swim_priv *swd)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
int err;
int drive;
struct swim __iomem *base = swd->base;
@@ -820,7 +823,7 @@ static int swim_floppy_init(struct swim_priv *swd)
goto exit_put_disks;
swd->unit[drive].disk =
- blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
@@ -1189,6 +1189,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct floppy_state *fs;
struct gendisk *disk;
int rc;
@@ -1210,7 +1213,7 @@ static int swim3_attach(struct macio_dev *mdev,
if (rc)
goto out_unregister;
- disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
+ disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
@@ -484,14 +484,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos)
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
- struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
- if (p->attrs & UBLK_ATTR_ROTATIONAL)
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
@@ -2214,6 +2208,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
lim.features |= BLK_FEAT_FUA;
}
+ if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -1451,7 +1451,9 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
- struct queue_limits lim = { };
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
int err, index;
unsigned int queue_depth;
@@ -1133,7 +1133,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = PTR_ERR(gd);
goto out_free_tag_set;
}
- blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -2245,8 +2245,6 @@ static int zram_add(void)
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
- /* zram devices sort of resembles non-rotational disks */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
@@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr)
.max_segments = 1,
/* set a large max size to get most from DMA */
.max_segment_size = 0x40000,
+ .features = BLK_FEAT_ROTATIONAL,
};
int err;
@@ -974,8 +974,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->minors = BCACHE_MINORS;
d->disk->fops = ops;
d->disk->private_data = d;
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
return 0;
out_bioset_exit:
@@ -1716,12 +1716,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
-static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- return !bdev_nonrot(dev->bdev);
-}
-
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1870,12 +1864,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
- /* Ensure that all underlying devices are non-rotational. */
- if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
@@ -6151,20 +6151,7 @@ int md_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) {
struct request_queue *q = mddev->gendisk->queue;
- bool nonrot = true;
- rdev_for_each(rdev, mddev) {
- if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
- nonrot = false;
- break;
- }
- }
- if (mddev->degraded)
- nonrot = false;
- if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
/* Set the NOWAIT flags if all underlying devices support it */
@@ -387,7 +387,6 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
blk_queue_rq_timeout(mq->queue, 60 * HZ);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
@@ -375,7 +375,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
gd->queue = new->rq;
@@ -1518,7 +1518,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
@@ -546,7 +546,6 @@ static int pmem_attach_disk(struct device *dev,
}
pmem->virt_addr = addr;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
@@ -3744,7 +3744,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->supports_pci_p2pdma &&
ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
@@ -549,7 +549,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
/*
@@ -68,7 +68,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -475,7 +475,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
bdev->gendisk->private_data = scmdev;
@@ -3314,7 +3314,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp,
rcu_read_unlock();
if (rot == 1) {
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ lim->features &= ~BLK_FEAT_ROTATIONAL;
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
@@ -3642,7 +3642,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ lim.features |= BLK_FEAT_ROTATIONAL;
blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
if (scsi_device_supports_vpd(sdp)) {
@@ -289,14 +289,16 @@ enum {
/* supports passing on the FUA bit */
BLK_FEAT_FUA = (1u << 1),
+
+ /* rotational device (hard drive or floppy) */
+ BLK_FEAT_ROTATIONAL = (1u << 2),
};
/*
* Flags automatically inherited when stacking limits.
*/
#define BLK_FEAT_INHERIT_MASK \
- (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA)
-
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL)
/* internal flags in queue_limits.flags */
enum {
@@ -553,8 +555,6 @@ struct request_queue {
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
-#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
@@ -589,7 +589,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
Move the norot flag into the queue_limits feature field so that it can be set atomically and all I/O is frozen when changing the flag. Use the chance to switch to defaulting to non-rotational and require the driver to opt into rotational, which matches the polarity of the sysfs interface. For the z2ram, ps3vram, 2x memstick, ubiblock and dcssblk the new rotational flag is not set as they clearly are not rotational despite this being a behavior change. There are some other drivers that unconditionally set the rotational flag to keep the existing behavior as they arguably can be used on rotational devices even if that is probably not their main use today (e.g. virtio_blk and drbd). The flag is automatically inherited in blk_stack_limits matching the existing behavior in dm and md. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/m68k/emu/nfblock.c | 1 + arch/um/drivers/ubd_kern.c | 1 - arch/xtensa/platforms/iss/simdisk.c | 5 +++- block/blk-mq-debugfs.c | 1 - block/blk-sysfs.c | 39 ++++++++++++++++++++++++++--- drivers/block/amiflop.c | 5 +++- drivers/block/aoe/aoeblk.c | 1 + drivers/block/ataflop.c | 5 +++- drivers/block/brd.c | 2 -- drivers/block/drbd/drbd_main.c | 3 ++- drivers/block/floppy.c | 3 ++- drivers/block/loop.c | 8 +++--- drivers/block/mtip32xx/mtip32xx.c | 1 - drivers/block/n64cart.c | 2 -- drivers/block/nbd.c | 5 ---- drivers/block/null_blk/main.c | 1 - drivers/block/pktcdvd.c | 1 + drivers/block/ps3disk.c | 3 ++- drivers/block/rbd.c | 3 --- drivers/block/rnbd/rnbd-clt.c | 4 --- drivers/block/sunvdc.c | 1 + drivers/block/swim.c | 5 +++- drivers/block/swim3.c | 5 +++- drivers/block/ublk_drv.c | 9 +++---- drivers/block/virtio_blk.c | 4 ++- drivers/block/xen-blkfront.c | 1 - drivers/block/zram/zram_drv.c | 2 -- drivers/cdrom/gdrom.c | 1 + drivers/md/bcache/super.c | 2 -- drivers/md/dm-table.c | 12 --------- drivers/md/md.c | 13 ---------- drivers/mmc/core/queue.c | 1 - drivers/mtd/mtd_blkdevs.c | 1 - drivers/nvdimm/btt.c | 1 - drivers/nvdimm/pmem.c | 1 - drivers/nvme/host/core.c | 1 - drivers/nvme/host/multipath.c | 1 - drivers/s390/block/dasd_genhd.c | 1 - drivers/s390/block/scm_blk.c | 1 - drivers/scsi/sd.c | 4 +-- include/linux/blkdev.h | 10 ++++---- 41 files changed, 83 insertions(+), 88 deletions(-)