@@ -97,7 +97,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX;
- spin_lock_init(&group->mark_lock);
+ mutex_init(&group->mark_mutex);
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
@@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
struct inode *inode = mark->i.inode;
+ BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&mark->group->mark_lock);
spin_lock(&inode->i_lock);
@@ -191,8 +191,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&group->mark_lock);
spin_lock(&inode->i_lock);
@@ -137,13 +137,13 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
group = mark->group;
spin_unlock(&mark->lock);
- spin_lock(&group->mark_lock);
+ mutex_lock(&group->mark_mutex);
spin_lock(&mark->lock);
/* something else already called this function on this mark */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
spin_unlock(&mark->lock);
- spin_unlock(&group->mark_lock);
+ mutex_unlock(&group->mark_mutex);
goto put_group;
}
@@ -160,7 +160,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
list_del_init(&mark->g_list);
spin_unlock(&mark->lock);
- spin_unlock(&group->mark_lock);
+ mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
@@ -234,11 +234,11 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
/*
* LOCKING ORDER!!!!
- * group->mark_lock
+ * group->mark_mutex
* mark->lock
* inode->i_lock
*/
- spin_lock(&group->mark_lock);
+ mutex_lock(&group->mark_mutex);
spin_lock(&mark->lock);
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
@@ -265,7 +265,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_set_mark_mask_locked(mark, mark->mask);
spin_unlock(&mark->lock);
- spin_unlock(&group->mark_lock);
+ mutex_unlock(&group->mark_mutex);
if (inode)
__fsnotify_update_child_dentry_flags(inode);
@@ -279,7 +279,7 @@ err:
atomic_dec(&group->num_marks);
spin_unlock(&mark->lock);
- spin_unlock(&group->mark_lock);
+ mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
@@ -299,7 +299,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
struct fsnotify_mark *lmark, *mark;
LIST_HEAD(free_list);
- spin_lock(&group->mark_lock);
+ mutex_lock(&group->mark_mutex);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) {
list_add(&mark->free_g_list, &free_list);
@@ -307,7 +307,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
fsnotify_get_mark(mark);
}
}
- spin_unlock(&group->mark_lock);
+ mutex_unlock(&group->mark_mutex);
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
fsnotify_destroy_mark(mark);
@@ -85,8 +85,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{
struct vfsmount *mnt = mark->m.mnt;
+ BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&mark->group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);
@@ -146,8 +146,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);
@@ -141,7 +141,7 @@ struct fsnotify_group {
unsigned int priority;
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
- spinlock_t mark_lock; /* protect marks_list */
+ struct mutex mark_mutex; /* protect marks_list */
atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */