@@ -1162,6 +1162,8 @@ void writeback_inodes_sb(struct super_block *sb)
.sync_mode = WB_SYNC_NONE,
};
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
+
args.nr_pages = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
@@ -1179,7 +1181,9 @@ EXPORT_SYMBOL(writeback_inodes_sb);
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
if (!writeback_in_progress(sb->s_bdi)) {
+ down_read(&sb->s_umount);
writeback_inodes_sb(sb);
+ up_read(&sb->s_umount);
return 1;
} else
return 0;
@@ -1202,6 +1206,8 @@ void sync_inodes_sb(struct super_block *sb)
.range_cyclic = 0,
};
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
+
bdi_queue_work_onstack(&args);
wait_sb_inodes(sb);
}
@@ -62,7 +62,9 @@
*/
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
+ down_read(&c->vfs_sb->s_umount);
writeback_inodes_sb(c->vfs_sb);
+ up_read(&c->vfs_sb->s_umount);
}
/**