@@ -47,6 +47,7 @@ struct slow_work {
#define SLOW_WORK_EXECUTING 1 /* item currently executing */
#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
+#define SLOW_WORK_CANCEL 4 /* item is cancelled, don't enqueue */
const struct slow_work_ops *ops; /* operations table for this item */
struct list_head link; /* link in queue */
};
@@ -96,11 +97,13 @@ static inline void vslow_work_init(struct slow_work *work,
}
extern int slow_work_enqueue(struct slow_work *work);
+extern void cancel_slow_work(struct slow_work *work);
extern int slow_work_register_user(void);
extern void slow_work_unregister_user(void);
extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
unsigned long delay);
+extern void cancel_delayed_slow_work(struct delayed_slow_work *dwork);
#ifdef CONFIG_SYSCTL
extern ctl_table slow_work_sysctls[];
@@ -194,7 +194,17 @@ static bool slow_work_execute(void)
if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
BUG();
- work->ops->execute(work);
+ /*
+ * Wake anyone waiting for this work to not be pending anymore
+ */
+ smp_mb__after_clear_bit();
+ wake_up_bit(&work->flags, SLOW_WORK_PENDING);
+
+ /*
+ * Don't execute if the work was cancelled after being added
+ */
+ if (!test_bit(SLOW_WORK_CANCEL, &work->flags))
+ work->ops->execute(work);
if (very_slow)
atomic_dec(&vslow_work_executing_count);
@@ -260,12 +270,16 @@ auto_requeue:
* allowed to pick items to execute. This ensures that very slow items won't
* overly block ones that are just ordinarily slow.
*
- * Returns 0 if successful, -EAGAIN if not.
+ * Returns 0 if successful, -EAGAIN if not (or -EBUSY if cancelled work is
+ * attempted queued)
*/
int slow_work_enqueue(struct slow_work *work)
{
unsigned long flags;
+ if (test_bit(SLOW_WORK_CANCEL, &work->flags))
+ return -EINVAL;
+
BUG_ON(slow_work_user_count <= 0);
BUG_ON(!work);
BUG_ON(!work->ops);
@@ -347,6 +361,9 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
struct slow_work *work = &dwork->work;
unsigned long flags;
+ if (test_bit(SLOW_WORK_CANCEL, &work->flags))
+ return -EINVAL;
+
BUG_ON(slow_work_user_count <= 0);
BUG_ON(!work);
BUG_ON(!work->ops);
@@ -377,6 +394,28 @@ cant_get_ref:
}
EXPORT_SYMBOL(delayed_slow_work_enqueue);
+static int slow_work_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+void cancel_slow_work(struct slow_work *work)
+{
+ set_bit(SLOW_WORK_CANCEL, &work->flags);
+ wait_on_bit(&work->flags, SLOW_WORK_PENDING, slow_work_wait,
+ TASK_UNINTERRUPTIBLE);
+ clear_bit(SLOW_WORK_CANCEL, &work->flags);
+}
+EXPORT_SYMBOL(cancel_slow_work);
+
+void cancel_delayed_slow_work(struct delayed_slow_work *dwork)
+{
+ del_timer(&dwork->timer);
+ cancel_slow_work(&dwork->work);
+}
+EXPORT_SYMBOL(cancel_delayed_slow_work);
+
/*
* Schedule a cull of the thread pool at some time in the near future
*/