@@ -305,7 +305,7 @@ cbw_co_snapshot_block_status(BlockDriverState *bs,
return -EACCES;
}
- ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
+ ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
if (child == s->target) {
/*
* We refer to s->target only for areas that we've written to it.
@@ -146,11 +146,11 @@ cor_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
local_flags = flags;
/* In case of failure, try to copy-on-read anyway */
- ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
+ ret = bdrv_co_is_allocated(bs->file->bs, offset, bytes, &n);
if (ret <= 0) {
- ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
- state->bottom_bs, true, offset,
- n, &n);
+ ret = bdrv_co_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
+ state->bottom_bs, true, offset,
+ n, &n);
if (ret > 0 || ret < 0) {
local_flags |= BDRV_REQ_COPY_ON_READ;
}
@@ -1216,8 +1216,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(align_bytes, max_transfer);
} else {
- ret = bdrv_is_allocated(bs, align_offset,
- MIN(align_bytes, max_transfer), &pnum);
+ ret = bdrv_co_is_allocated(bs, align_offset,
+ MIN(align_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
@@ -1364,7 +1364,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
flags &= ~BDRV_REQ_COPY_ON_READ;
- ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
+ ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
if (ret < 0) {
goto out;
}
@@ -559,9 +559,9 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
assert(!(offset % s->granularity));
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_block_status_above(source, NULL, offset,
- nb_chunks * s->granularity,
- &io_bytes, NULL, NULL);
+ ret = bdrv_co_block_status_above(source, NULL, offset,
+ nb_chunks * s->granularity,
+ &io_bytes, NULL, NULL);
}
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
@@ -875,8 +875,8 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
}
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
- bytes, &count);
+ ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
+ bytes, &count);
}
if (ret < 0) {
return ret;
@@ -3986,7 +3986,8 @@ finish:
}
-static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
+static bool coroutine_fn GRAPH_RDLOCK
+is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
int64_t nr;
int res;
@@ -4007,7 +4008,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
* backing file. So, we need a loop.
*/
do {
- res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
+ res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
offset += nr;
bytes -= nr;
} while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
@@ -276,10 +276,10 @@ replication_co_writev(BlockDriverState *bs, int64_t sector_num,
while (remaining_sectors > 0) {
int64_t count;
- ret = bdrv_is_allocated_above(top->bs, base->bs, false,
- sector_num * BDRV_SECTOR_SIZE,
- remaining_sectors * BDRV_SECTOR_SIZE,
- &count);
+ ret = bdrv_co_is_allocated_above(top->bs, base->bs, false,
+ sector_num * BDRV_SECTOR_SIZE,
+ remaining_sectors * BDRV_SECTOR_SIZE,
+ &count);
if (ret < 0) {
goto out1;
}
@@ -162,7 +162,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
copy = false;
WITH_GRAPH_RDLOCK_GUARD() {
- ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
+ ret = bdrv_co_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
@@ -170,9 +170,9 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
*/
- ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
- s->base_overlay, true,
- offset, n, &n);
+ ret = bdrv_co_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
+ s->base_overlay, true,
+ offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;
@@ -1481,8 +1481,8 @@ vvfat_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sector
if (s->qcow) {
int64_t n;
int ret;
- ret = bdrv_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
- (nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
+ ret = bdrv_co_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
+ (nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
if (ret < 0) {
return ret;
}
@@ -1807,10 +1807,10 @@ cluster_was_modified(BDRVVVFATState *s, uint32_t cluster_num)
}
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++) {
- was_modified = bdrv_is_allocated(s->qcow->bs,
- (cluster2sector(s, cluster_num) +
- i) * BDRV_SECTOR_SIZE,
- BDRV_SECTOR_SIZE, NULL);
+ was_modified = bdrv_co_is_allocated(s->qcow->bs,
+ (cluster2sector(s, cluster_num) +
+ i) * BDRV_SECTOR_SIZE,
+ BDRV_SECTOR_SIZE, NULL);
}
/*
@@ -1968,9 +1968,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
for (i = 0; i < s->sectors_per_cluster; i++) {
int res;
- res = bdrv_is_allocated(s->qcow->bs,
- (offset + i) * BDRV_SECTOR_SIZE,
- BDRV_SECTOR_SIZE, NULL);
+ res = bdrv_co_is_allocated(s->qcow->bs,
+ (offset + i) * BDRV_SECTOR_SIZE,
+ BDRV_SECTOR_SIZE, NULL);
if (res < 0) {
return -1;
}