@@ -43,7 +43,7 @@ EXPORT_SYMBOL(I_BDEV);
static void bdev_write_inode(struct block_device *bdev)
{
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = bdev_inode(bdev);
int ret;
spin_lock(&inode->i_lock);
@@ -62,7 +62,7 @@ static void bdev_write_inode(struct block_device *bdev)
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev_inode(bdev)->i_mapping;
if (mapping_empty(mapping))
return;
@@ -74,7 +74,7 @@ static void kill_bdev(struct block_device *bdev)
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev_inode(bdev)->i_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
@@ -102,7 +102,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
goto invalidate;
}
- truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
+ truncate_inode_pages_range(bdev_inode(bdev)->i_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
@@ -112,7 +112,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
- return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
+ return invalidate_inode_pages2_range(bdev_inode(bdev)->i_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
@@ -120,18 +120,21 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
static void set_init_blocksize(struct block_device *bdev)
{
unsigned int bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
+ struct inode *inode = bdev_inode(bdev);
+ loff_t size = i_size_read(inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size)
{
+ struct inode *inode;
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -141,9 +144,10 @@ int set_blocksize(struct block_device *bdev, int size)
return -EINVAL;
/* Don't change the size if it is same as current */
- if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
+ inode = bdev_inode(bdev);
+ if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
- bdev->bd_inode->i_blkbits = blksize_bits(size);
+ inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
return 0;
@@ -178,7 +182,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_flush(bdev->bd_inode->i_mapping);
+ return filemap_flush(bdev_inode(bdev)->i_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
@@ -190,13 +194,13 @@ int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return filemap_write_and_wait(bdev_inode(bdev)->i_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
- return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
+ return filemap_write_and_wait_range(bdev_inode(bdev)->i_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
@@ -395,7 +399,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
spin_lock_init(&bdev->bd_size_lock);
mutex_init(&bdev->bd_holder_lock);
bdev->bd_partno = partno;
- bdev->bd_inode = inode;
bdev->bd_queue = disk->queue;
if (partno)
bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
@@ -413,17 +416,19 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
- i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ i_size_write(bdev_inode(bdev), (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
+ struct inode *inode = bdev_inode(bdev);
+
bdev->bd_dev = dev;
- bdev->bd_inode->i_rdev = dev;
- bdev->bd_inode->i_ino = dev;
- insert_inode_hash(bdev->bd_inode);
+ inode->i_rdev = dev;
+ inode->i_ino = dev;
+ insert_inode_hash(inode);
}
long nr_blockdev_pages(void)
@@ -401,7 +401,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev_inode(bdev)->i_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
@@ -424,7 +424,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
fail:
if (cmd == BLKRESETZONE)
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev_inode(bdev)->i_mapping);
return ret;
}
@@ -605,7 +605,7 @@ static int blkdev_open(struct inode *inode, struct file *filp)
if (bdev_nowait(handle->bdev))
filp->f_mode |= FMODE_NOWAIT;
- filp->f_mapping = handle->bdev->bd_inode->i_mapping;
+ filp->f_mapping = bdev_inode(handle->bdev)->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = handle;
return 0;
@@ -657,7 +657,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct block_device *bdev = I_BDEV(file->f_mapping->host);
- struct inode *bd_inode = bdev->bd_inode;
+ struct inode *bd_inode = bdev_inode(bdev);
loff_t size = bdev_nr_bytes(bdev);
size_t shorted = 0;
ssize_t ret;
@@ -653,7 +653,7 @@ void del_gendisk(struct gendisk *disk)
*/
mutex_lock(&disk->open_mutex);
xa_for_each(&disk->part_tbl, idx, part)
- remove_inode_hash(part->bd_inode);
+ remove_inode_hash(bdev_inode(part));
mutex_unlock(&disk->open_mutex);
/*
@@ -742,7 +742,7 @@ void invalidate_disk(struct gendisk *disk)
struct block_device *bdev = disk->part0;
invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
+ bdev_inode(bdev)->i_mapping->wb_err = 0;
set_capacity(disk, 0);
}
EXPORT_SYMBOL(invalidate_disk);
@@ -1188,7 +1188,7 @@ static void disk_release(struct device *dev)
if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
disk->fops->free_disk(disk);
- iput(disk->part0->bd_inode); /* frees the disk */
+ iput(bdev_inode(disk->part0)); /* frees the disk */
}
static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -1378,7 +1378,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
disk->part0->bd_disk = NULL;
- iput(disk->part0->bd_inode);
+ iput(bdev_inode(disk->part0));
out_free_bdi:
bdi_put(disk->bdi);
out_free_bioset:
@@ -89,7 +89,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
{
uint64_t range[2];
uint64_t start, len;
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = bdev_inode(bdev);
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -143,12 +143,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev_inode(bdev)->i_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev_inode(bdev)->i_mapping);
return err;
}
@@ -158,7 +158,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
{
uint64_t range[2];
uint64_t start, end, len;
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = bdev_inode(bdev);
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -243,7 +243,7 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
put_disk(dev_to_bdev(dev)->bd_disk);
- iput(dev_to_bdev(dev)->bd_inode);
+ iput(bdev_inode(dev_to_bdev(dev)));
}
static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -483,7 +483,7 @@ int bdev_del_partition(struct gendisk *disk, int partno)
* Just delete the partition and invalidate it.
*/
- remove_inode_hash(part->bd_inode);
+ remove_inode_hash(bdev_inode(part));
invalidate_bdev(part);
drop_partition(part);
ret = 0;
@@ -669,7 +669,7 @@ int bdev_disk_changed(struct gendisk *disk, bool invalidate)
* it cannot be looked up any more even when openers
* still hold references.
*/
- remove_inode_hash(part->bd_inode);
+ remove_inode_hash(bdev_inode(part));
/*
* If @disk->open_partitions isn't elevated but there's
@@ -718,7 +718,8 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
- struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
+ struct address_space *mapping =
+ bdev_inode(state->disk->part0)->i_mapping;
struct folio *folio;
if (n >= get_capacity(state->disk)) {
@@ -50,8 +50,7 @@ struct block_device {
bool bd_has_submit_bio;
dev_t bd_dev;
atomic_t bd_openers;
- spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
- struct inode * bd_inode; /* will die */
+ spinlock_t bd_size_lock; /* for i_size updates */
void * bd_claiming;
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
@@ -211,7 +211,7 @@ struct gendisk {
static inline bool disk_live(struct gendisk *disk)
{
- return !inode_unhashed(disk->part0->bd_inode);
+ return !inode_unhashed(bdev_inode(disk->part0));
}
/**
@@ -1339,7 +1339,7 @@ static inline unsigned int blksize_bits(unsigned int size)
static inline unsigned int block_size(struct block_device *bdev)
{
- return 1 << bdev->bd_inode->i_blkbits;
+ return 1 << bdev_inode(bdev)->i_blkbits;
}
int kblockd_schedule_work(struct work_struct *work);