@@ -303,6 +303,39 @@ void job_txn_unref(JobTxn *txn);
*/
void job_txn_add_job(JobTxn *txn, Job *job);
+/** Returns the @ret field of a given Job. */
+int job_get_ret(Job *job);
+
+/** Returns the AioContext of a given Job. */
+AioContext *job_get_aiocontext(Job *job);
+
+/** Sets the AioContext of a given Job. */
+void job_set_aiocontext(Job *job, AioContext *aio);
+
+/** Returns if a given Job is busy. */
+bool job_is_busy(Job *job);
+
+/** Returns the Error of a given Job. */
+Error *job_get_err(Job *job);
+
+/** Returns if a Job has a pause_count > 0. */
+bool job_should_pause(Job *job);
+
+/** Sets the user_paused flag of a given Job to true. */
+void job_set_user_paused(Job *job);
+
+/** Sets the cancelled flag of a given Job. */
+void job_set_cancelled(Job *job, bool cancel);
+
+/** Returns if a given Job is paused. */
+bool job_is_paused(Job *job);
+
+/** Returns if a given Job is force cancelled. */
+bool job_is_force_cancel(Job *job);
+
+/** Returns the statis of a given Job. */
+JobStatus job_get_status(Job *job);
+
/**
* Create a new long-running job and return it.
*
@@ -5721,7 +5721,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
GSList *el;
xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
- job->job.id);
+ job->job.id);
for (el = job->nodes; el; el = el->next) {
xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
}
@@ -367,7 +367,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
goto fail;
}
- s->base = blk_new(s->common.job.aio_context,
+ s->base = blk_new(job_get_aiocontext(&s->common.job),
base_perms,
BLK_PERM_CONSISTENT_READ
| BLK_PERM_GRAPH_MOD
@@ -380,7 +380,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
s->base_bs = base;
/* Required permissions are already taken with block_job_add_bdrv() */
- s->top = blk_new(s->common.job.aio_context, 0, BLK_PERM_ALL);
+ s->top = blk_new(job_get_aiocontext(&s->common.job), 0, BLK_PERM_ALL);
ret = blk_insert_bs(s->top, top, errp);
if (ret < 0) {
goto fail;
@@ -636,7 +636,7 @@ static int mirror_exit_common(Job *job)
BlockDriverState *target_bs;
BlockDriverState *mirror_top_bs;
Error *local_err = NULL;
- bool abort = job->ret < 0;
+ bool abort = job_get_ret(job) < 0;
int ret = 0;
if (s->prepared) {
@@ -930,7 +930,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
job_yield(&s->common.job);
}
- s->common.job.cancelled = false;
+ job_set_cancelled(&s->common.job, false);
goto immediate_exit;
}
@@ -1065,7 +1065,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
* completion.
*/
assert(QLIST_EMPTY(&bs->tracked_requests));
- s->common.job.cancelled = false;
+ job_set_cancelled(&s->common.job, false);
need_drain = false;
break;
}
@@ -1079,7 +1079,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
job_sleep_ns(&s->common.job, delay_ns);
if (job_is_cancelled(&s->common.job) &&
- (!s->synced || s->common.job.force_cancel))
+ (!s->synced || job_is_force_cancel(&s->common.job)))
{
break;
}
@@ -1092,8 +1092,8 @@ immediate_exit:
* or it was cancelled prematurely so that we do not guarantee that
* the target is a copy of the source.
*/
- assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
- job_is_cancelled(&s->common.job)));
+ assert(ret < 0 || ((job_is_force_cancel(&s->common.job) || !s->synced)
+ && job_is_cancelled(&s->common.job)));
assert(need_drain);
mirror_wait_for_all_io(s);
}
@@ -1150,7 +1150,7 @@ static void mirror_complete(Job *job, Error **errp)
s->should_complete = true;
/* If the job is paused, it will be re-entered when it is resumed */
- if (!job->paused) {
+ if (!job_is_paused(job)) {
job_enter(job);
}
}
@@ -1171,7 +1171,8 @@ static bool mirror_drained_poll(BlockJob *job)
* from one of our own drain sections, to avoid a deadlock waiting for
* ourselves.
*/
- if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
+ if (!job_is_paused(&s->common.job) && !job_is_cancelled(&s->common.job) &&
+ !s->in_drain) {
return true;
}
@@ -149,7 +149,8 @@ static void replication_close(BlockDriverState *bs)
}
if (s->stage == BLOCK_REPLICATION_FAILOVER) {
commit_job = &s->commit_job->job;
- assert(commit_job->aio_context == qemu_get_current_aio_context());
+ assert(job_get_aiocontext(commit_job) ==
+ qemu_get_current_aio_context());
job_cancel_sync(commit_job);
}
@@ -147,7 +147,7 @@ void blockdev_mark_auto_del(BlockBackend *blk)
for (job = block_job_next(NULL); job; job = block_job_next(job)) {
if (block_job_has_bdrv(job, blk_bs(blk))) {
- AioContext *aio_context = job->job.aio_context;
+ AioContext *aio_context = job_get_aiocontext(&job->job);
aio_context_acquire(aio_context);
job_cancel(&job->job, false);
@@ -112,7 +112,7 @@ static bool child_job_drained_poll(BdrvChild *c)
/* An inactive or completed job doesn't have any pending requests. Jobs
* with !job->busy are either already paused or have a pause point after
* being reentered, so no job driver code will run before they pause. */
- if (!job->busy || job_is_completed(job)) {
+ if (!job_is_busy(job) || job_is_completed(job)) {
return false;
}
@@ -161,14 +161,14 @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
}
- job->job.aio_context = ctx;
+ job_set_aiocontext(&job->job, ctx);
}
static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
{
BlockJob *job = c->opaque;
- return job->job.aio_context;
+ return job_get_aiocontext(&job->job);
}
static const BdrvChildClass child_job = {
@@ -222,18 +222,19 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
{
BdrvChild *c;
bool need_context_ops;
+ AioContext *ctx = job_get_aiocontext(&job->job);
bdrv_ref(bs);
- need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context;
+ need_context_ops = bdrv_get_aio_context(bs) != ctx;
- if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
- aio_context_release(job->job.aio_context);
+ if (need_context_ops && ctx != qemu_get_aio_context()) {
+ aio_context_release(ctx);
}
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp);
- if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
- aio_context_acquire(job->job.aio_context);
+ if (need_context_ops && ctx != qemu_get_aio_context()) {
+ aio_context_acquire(ctx);
}
if (c == NULL) {
return -EPERM;
@@ -303,37 +304,41 @@ int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
return ratelimit_calculate_delay(&job->limit, n);
}
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
+BlockJobInfo *block_job_query(BlockJob *blkjob, Error **errp)
{
BlockJobInfo *info;
uint64_t progress_current, progress_total;
+ int job_ret;
+ Job *job = &blkjob->job;
- if (block_job_is_internal(job)) {
+ if (block_job_is_internal(blkjob)) {
error_setg(errp, "Cannot query QEMU internal jobs");
return NULL;
}
- progress_get_snapshot(&job->job.progress, &progress_current,
+ progress_get_snapshot(&job->progress, &progress_current,
&progress_total);
info = g_new0(BlockJobInfo, 1);
- info->type = g_strdup(job_type_str(&job->job));
- info->device = g_strdup(job->job.id);
- info->busy = qatomic_read(&job->job.busy);
- info->paused = job->job.pause_count > 0;
+ info->type = g_strdup(job_type_str(job));
+ info->device = g_strdup(job->id);
+ info->busy = job_is_busy(job);
+ info->paused = job_should_pause(job);
info->offset = progress_current;
info->len = progress_total;
- info->speed = job->speed;
- info->io_status = job->iostatus;
- info->ready = job_is_ready(&job->job),
- info->status = job->job.status;
- info->auto_finalize = job->job.auto_finalize;
- info->auto_dismiss = job->job.auto_dismiss;
- if (job->job.ret) {
+ info->speed = blkjob->speed;
+ info->io_status = blkjob->iostatus;
+ info->ready = job_is_ready(job);
+ info->status = job_get_status(job);
+ info->auto_finalize = job->auto_finalize;
+ info->auto_dismiss = job->auto_dismiss;
+ job_ret = job_get_ret(job);
+ if (job_ret) {
+ Error *job_err = job_get_err(job);
info->has_error = true;
- info->error = job->job.err ?
- g_strdup(error_get_pretty(job->job.err)) :
- g_strdup(strerror(-job->job.ret));
+ info->error = job_err ?
+ g_strdup(error_get_pretty(job_err)) :
+ g_strdup(strerror(-job_ret));
}
return info;
}
@@ -367,26 +372,27 @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
static void block_job_event_completed(Notifier *n, void *opaque)
{
- BlockJob *job = opaque;
+ BlockJob *blkjob = opaque;
const char *msg = NULL;
uint64_t progress_current, progress_total;
+ Job *job = &blkjob->job;
- if (block_job_is_internal(job)) {
+ if (block_job_is_internal(blkjob)) {
return;
}
- if (job->job.ret < 0) {
- msg = error_get_pretty(job->job.err);
+ if (job_get_ret(job) < 0) {
+ msg = error_get_pretty(job_get_err(job));
}
- progress_get_snapshot(&job->job.progress, &progress_current,
+ progress_get_snapshot(&job->progress, &progress_current,
&progress_total);
- qapi_event_send_block_job_completed(job_type(&job->job),
- job->job.id,
+ qapi_event_send_block_job_completed(job_type(job),
+ job->id,
progress_total,
progress_current,
- job->speed,
+ blkjob->speed,
!!msg,
msg);
}
@@ -498,7 +504,7 @@ void block_job_iostatus_reset(BlockJob *job)
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
return;
}
- assert(job->job.user_paused && job->job.pause_count > 0);
+ assert(job_user_paused(&job->job) && job_should_pause(&job->job));
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
}
@@ -538,10 +544,10 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
action);
}
if (action == BLOCK_ERROR_ACTION_STOP) {
- if (!job->job.user_paused) {
+ if (!job_user_paused(&job->job)) {
job_pause(&job->job);
/* make the pause user visible, which will be resumed from QMP. */
- job->job.user_paused = true;
+ job_set_user_paused(&job->job);
}
block_job_iostatus_set_err(job, error);
}
@@ -42,7 +42,7 @@ static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
return NULL;
}
- *aio_context = job->aio_context;
+ *aio_context = job_get_aiocontext(job);
aio_context_acquire(*aio_context);
return job;
@@ -122,7 +122,7 @@ void qmp_job_finalize(const char *id, Error **errp)
* automatically acquires the new one), so make sure we release the correct
* one.
*/
- aio_context = job->aio_context;
+ aio_context = job_get_aiocontext(job);
job_unref(job);
aio_context_release(aio_context);
}
@@ -146,21 +146,23 @@ static JobInfo *job_query_single(Job *job, Error **errp)
JobInfo *info;
uint64_t progress_current;
uint64_t progress_total;
+ Error *job_err;
assert(!job_is_internal(job));
progress_get_snapshot(&job->progress, &progress_current,
&progress_total);
+ job_err = job_get_err(job);
info = g_new(JobInfo, 1);
*info = (JobInfo) {
.id = g_strdup(job->id),
.type = job_type(job),
- .status = job->status,
+ .status = job_get_status(job),
.current_progress = progress_current,
.total_progress = progress_total,
- .has_error = !!job->err,
- .error = job->err ? \
- g_strdup(error_get_pretty(job->err)) : NULL,
+ .has_error = !!job_err,
+ .error = job_err ? \
+ g_strdup(error_get_pretty(job_err)) : NULL,
};
return info;
@@ -178,7 +180,7 @@ JobInfoList *qmp_query_jobs(Error **errp)
if (job_is_internal(job)) {
continue;
}
- aio_context = job->aio_context;
+ aio_context = job_get_aiocontext(job);
aio_context_acquire(aio_context);
value = job_query_single(job, errp);
aio_context_release(aio_context);
@@ -94,6 +94,46 @@ static void __attribute__((__constructor__)) job_init(void)
qemu_mutex_init(&job_mutex);
}
+AioContext *job_get_aiocontext(Job *job)
+{
+ return job->aio_context;
+}
+
+void job_set_aiocontext(Job *job, AioContext *aio)
+{
+ job->aio_context = aio;
+}
+
+bool job_is_busy(Job *job)
+{
+ return qatomic_read(&job->busy);
+}
+
+int job_get_ret(Job *job)
+{
+ return job->ret;
+}
+
+Error *job_get_err(Job *job)
+{
+ return job->err;
+}
+
+JobStatus job_get_status(Job *job)
+{
+ return job->status;
+}
+
+void job_set_cancelled(Job *job, bool cancel)
+{
+ job->cancelled = cancel;
+}
+
+bool job_is_force_cancel(Job *job)
+{
+ return job->force_cancel;
+}
+
JobTxn *job_txn_new(void)
{
JobTxn *txn = g_new0(JobTxn, 1);
@@ -269,11 +309,16 @@ static bool job_started(Job *job)
return job->co;
}
-static bool job_should_pause(Job *job)
+bool job_should_pause(Job *job)
{
return job->pause_count > 0;
}
+bool job_is_paused(Job *job)
+{
+ return job->paused;
+}
+
Job *job_next(Job *job)
{
if (!job) {
@@ -591,6 +636,11 @@ bool job_user_paused(Job *job)
return job->user_paused;
}
+void job_set_user_paused(Job *job)
+{
+ job->user_paused = true;
+}
+
void job_user_resume(Job *job, Error **errp)
{
assert(job);
@@ -921,7 +921,7 @@ static void run_block_job(BlockJob *job, Error **errp)
if (!job_is_completed(&job->job)) {
ret = job_complete_sync(&job->job, errp);
} else {
- ret = job->job.ret;
+ ret = job_get_ret(&job->job);
}
job_unref(&job->job);
aio_context_release(aio_context);
Using getters/setters we can have a more strict control on struct Job fields. The struct remains public, because it is also used as base class for BlockJobs and various, but replace all direct accesses to the fields we want to protect with getters/setters. This is in preparation to the locking patches. No functional change intended. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> --- include/qemu/job.h | 33 +++++++++++++++++++ block.c | 2 +- block/commit.c | 4 +-- block/mirror.c | 17 +++++----- block/replication.c | 3 +- blockdev.c | 2 +- blockjob.c | 78 ++++++++++++++++++++++++--------------------- job-qmp.c | 16 ++++++---- job.c | 52 +++++++++++++++++++++++++++++- qemu-img.c | 2 +- 10 files changed, 151 insertions(+), 58 deletions(-)