@@ -510,6 +510,18 @@ typedef enum QCow2MetadataOverlap {
#define INV_OFFSET (-1ULL)
+static inline uint64_t get_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx)
+{
+ return be64_to_cpu(l2_slice[idx]);
+}
+
+static inline void set_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx, uint64_t entry)
+{
+ l2_slice[idx] = cpu_to_be64(entry);
+}
+
static inline bool has_data_file(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
@@ -383,12 +383,13 @@ fail:
* cluster which may require a different handling)
*/
static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
- int cluster_size, uint64_t *l2_slice, uint64_t stop_flags)
+ int cluster_size, uint64_t *l2_slice, int l2_index, uint64_t stop_flags)
{
+ BDRVQcow2State *s = bs->opaque;
int i;
QCow2ClusterType first_cluster_type;
uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
- uint64_t first_entry = be64_to_cpu(l2_slice[0]);
+ uint64_t first_entry = get_l2_entry(s, l2_slice, l2_index);
uint64_t offset = first_entry & mask;
first_cluster_type = qcow2_get_cluster_type(bs, first_entry);
@@ -401,7 +402,7 @@ static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
for (i = 0; i < nb_clusters; i++) {
- uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
+ uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index + i) & mask;
if (offset + (uint64_t) i * cluster_size != l2_entry) {
break;
}
@@ -417,14 +418,16 @@ static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
static int count_contiguous_clusters_unallocated(BlockDriverState *bs,
int nb_clusters,
uint64_t *l2_slice,
+ int l2_index,
QCow2ClusterType wanted_type)
{
+ BDRVQcow2State *s = bs->opaque;
int i;
assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
wanted_type == QCOW2_CLUSTER_UNALLOCATED);
for (i = 0; i < nb_clusters; i++) {
- uint64_t entry = be64_to_cpu(l2_slice[i]);
+ uint64_t entry = get_l2_entry(s, l2_slice, l2_index + i);
QCow2ClusterType type = qcow2_get_cluster_type(bs, entry);
if (type != wanted_type) {
@@ -575,7 +578,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
/* find the cluster offset for the given disk offset */
l2_index = offset_to_l2_slice_index(s, offset);
- l2_entry = be64_to_cpu(l2_slice[l2_index]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index);
nb_clusters = size_to_clusters(s, bytes_needed);
/* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
@@ -610,7 +613,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
case QCOW2_CLUSTER_UNALLOCATED:
/* how many empty clusters ? */
c = count_contiguous_clusters_unallocated(bs, nb_clusters,
- &l2_slice[l2_index], type);
+ l2_slice, l2_index, type);
break;
case QCOW2_CLUSTER_ZERO_ALLOC:
case QCOW2_CLUSTER_NORMAL: {
@@ -618,7 +621,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
*host_offset = host_cluster_offset + offset_in_cluster;
/* how many allocated clusters ? */
c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
- &l2_slice[l2_index], QCOW_OFLAG_ZERO);
+ l2_slice, l2_index, QCOW_OFLAG_ZERO);
if (offset_into_cluster(s, host_cluster_offset)) {
qcow2_signal_corruption(bs, true, -1, -1,
"Cluster allocation offset %#"
@@ -770,7 +773,7 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
/* Compression can't overwrite anything. Fail if the cluster was already
* allocated. */
- cluster_offset = be64_to_cpu(l2_slice[l2_index]);
+ cluster_offset = get_l2_entry(s, l2_slice, l2_index);
if (cluster_offset & L2E_OFFSET_MASK) {
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
return -EIO;
@@ -799,7 +802,7 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
- l2_slice[l2_index] = cpu_to_be64(cluster_offset);
+ set_l2_entry(s, l2_slice, l2_index, cluster_offset);
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
*host_offset = cluster_offset & s->cluster_offset_mask;
@@ -992,14 +995,14 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
* cluster the second one has to do RMW (which is done above by
* perform_cow()), update l2 table with its cluster pointer and free
* old cluster. This is what this loop does */
- if (l2_slice[l2_index + i] != 0) {
- old_cluster[j++] = l2_slice[l2_index + i];
+ if (get_l2_entry(s, l2_slice, l2_index + i) != 0) {
+ old_cluster[j++] = get_l2_entry(s, l2_slice, l2_index + i);
}
/* The offset must fit in the offset field of the L2 table entry */
assert((offset & L2E_OFFSET_MASK) == offset);
- l2_slice[l2_index + i] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
+ set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED);
}
@@ -1013,8 +1016,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
*/
if (!m->keep_old_clusters && j != 0) {
for (i = 0; i < j; i++) {
- qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
- QCOW2_DISCARD_NEVER);
+ qcow2_free_any_clusters(bs, old_cluster[i], 1, QCOW2_DISCARD_NEVER);
}
}
@@ -1077,7 +1079,7 @@ static void calculate_l2_meta(BlockDriverState *bs,
if (keep_old) {
int i;
for (i = 0; i < nb_clusters; i++) {
- l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
if (qcow2_get_cluster_type(bs, l2_entry) != QCOW2_CLUSTER_NORMAL) {
break;
}
@@ -1088,7 +1090,7 @@ static void calculate_l2_meta(BlockDriverState *bs,
}
/* Get the L2 entry of the first cluster */
- l2_entry = be64_to_cpu(l2_slice[l2_index]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index);
type = qcow2_get_cluster_type(bs, l2_entry);
if (type == QCOW2_CLUSTER_NORMAL && keep_old) {
@@ -1098,7 +1100,7 @@ static void calculate_l2_meta(BlockDriverState *bs,
}
/* Get the L2 entry of the last cluster */
- l2_entry = be64_to_cpu(l2_slice[l2_index + nb_clusters - 1]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index + nb_clusters - 1);
type = qcow2_get_cluster_type(bs, l2_entry);
if (type == QCOW2_CLUSTER_NORMAL && keep_old) {
@@ -1171,12 +1173,12 @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
bool new_alloc)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index]);
+ uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index);
uint64_t expected_offset = l2_entry & L2E_OFFSET_MASK;
int i;
for (i = 0; i < nb_clusters; i++) {
- l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
if (cluster_needs_new_alloc(bs, l2_entry) != new_alloc) {
break;
}
@@ -1312,7 +1314,7 @@ static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
return ret;
}
- l2_entry = be64_to_cpu(l2_slice[l2_index]);
+ l2_entry = get_l2_entry(s, l2_slice, l2_index);
cluster_offset = l2_entry & L2E_OFFSET_MASK;
if (!cluster_needs_new_alloc(bs, l2_entry)) {
@@ -1689,7 +1691,7 @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
for (i = 0; i < nb_clusters; i++) {
uint64_t old_l2_entry;
- old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
+ old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
/*
* If full_discard is false, make sure that a discarded area reads back
@@ -1729,9 +1731,9 @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
/* First remove L2 entries */
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
if (!full_discard && s->qcow_version >= 3) {
- l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
+ set_l2_entry(s, l2_slice, l2_index + i, QCOW_OFLAG_ZERO);
} else {
- l2_slice[l2_index + i] = cpu_to_be64(0);
+ set_l2_entry(s, l2_slice, l2_index + i, 0);
}
/* Then decrease the refcount */
@@ -1811,7 +1813,7 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t old_offset;
QCow2ClusterType cluster_type;
- old_offset = be64_to_cpu(l2_slice[l2_index + i]);
+ old_offset = get_l2_entry(s, l2_slice, l2_index + i);
/*
* Minimize L2 changes if the cluster already reads back as
@@ -1825,10 +1827,11 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
- l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
+ set_l2_entry(s, l2_slice, l2_index + i, QCOW_OFLAG_ZERO);
qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
} else {
- l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
+ uint64_t entry = get_l2_entry(s, l2_slice, l2_index + i);
+ set_l2_entry(s, l2_slice, l2_index + i, entry | QCOW_OFLAG_ZERO);
}
}
@@ -1966,7 +1969,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
}
for (j = 0; j < s->l2_slice_size; j++) {
- uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
+ uint64_t l2_entry = get_l2_entry(s, l2_slice, j);
int64_t offset = l2_entry & L2E_OFFSET_MASK;
QCow2ClusterType cluster_type =
qcow2_get_cluster_type(bs, l2_entry);
@@ -1980,7 +1983,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
if (!bs->backing) {
/* not backed; therefore we can simply deallocate the
* cluster */
- l2_slice[j] = 0;
+ set_l2_entry(s, l2_slice, j, 0);
l2_dirty = true;
continue;
}
@@ -2046,9 +2049,9 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
}
if (l2_refcount == 1) {
- l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
+ set_l2_entry(s, l2_slice, j, offset | QCOW_OFLAG_COPIED);
} else {
- l2_slice[j] = cpu_to_be64(offset);
+ set_l2_entry(s, l2_slice, j, offset);
}
l2_dirty = true;
}
@@ -1310,7 +1310,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
uint64_t cluster_index;
uint64_t offset;
- entry = be64_to_cpu(l2_slice[j]);
+ entry = get_l2_entry(s, l2_slice, j);
old_entry = entry;
entry &= ~QCOW_OFLAG_COPIED;
offset = entry & L2E_OFFSET_MASK;
@@ -1384,7 +1384,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
qcow2_cache_set_dependency(bs, s->l2_table_cache,
s->refcount_block_cache);
}
- l2_slice[j] = cpu_to_be64(entry);
+ set_l2_entry(s, l2_slice, j, entry);
qcow2_cache_entry_mark_dirty(s->l2_table_cache,
l2_slice);
}
@@ -1617,7 +1617,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
/* Do the actual checks */
for(i = 0; i < s->l2_size; i++) {
- l2_entry = be64_to_cpu(l2_table[i]);
+ l2_entry = get_l2_entry(s, l2_table, i);
switch (qcow2_get_cluster_type(bs, l2_entry)) {
case QCOW2_CLUSTER_COMPRESSED:
@@ -1686,7 +1686,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
QCOW2_OL_INACTIVE_L2;
l2_entry = QCOW_OFLAG_ZERO;
- l2_table[i] = cpu_to_be64(l2_entry);
+ set_l2_entry(s, l2_table, i, l2_entry);
ret = qcow2_pre_write_overlap_check(bs, ign,
l2e_offset, sizeof(uint64_t), false);
if (ret < 0) {
@@ -1914,7 +1914,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
}
for (j = 0; j < s->l2_size; j++) {
- uint64_t l2_entry = be64_to_cpu(l2_table[j]);
+ uint64_t l2_entry = get_l2_entry(s, l2_table, j);
uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
@@ -1937,9 +1937,10 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
"l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
repair ? "Repairing" : "ERROR", l2_entry, refcount);
if (repair) {
- l2_table[j] = cpu_to_be64(refcount == 1
- ? l2_entry | QCOW_OFLAG_COPIED
- : l2_entry & ~QCOW_OFLAG_COPIED);
+ set_l2_entry(s, l2_table, j,
+ refcount == 1 ?
+ l2_entry | QCOW_OFLAG_COPIED :
+ l2_entry & ~QCOW_OFLAG_COPIED);
l2_dirty++;
}
}