diff mbox series

[RFC,v2,02/78] block: add fallthrough pseudo-keyword

Message ID e54aa70630d6d524886d7950b84d5bda39c3d605.1697183699.git.manos.pitsidianakis@linaro.org
State New
Headers show
Series Strict disable implicit fallthrough | expand

Commit Message

Manos Pitsidianakis Oct. 13, 2023, 7:56 a.m. UTC
In preparation of raising -Wimplicit-fallthrough to 5, replace all
fall-through comments with the fallthrough attribute pseudo-keyword.

Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
---
 block/block-copy.c    |  1 +
 block/file-posix.c    |  1 +
 block/io.c            |  1 +
 block/iscsi.c         |  1 +
 block/qcow2-cluster.c |  5 ++++-
 block/vhdx.c          | 17 +++++++++++++----
 6 files changed, 21 insertions(+), 5 deletions(-)

Comments

Eric Blake Oct. 17, 2023, 10:11 p.m. UTC | #1
On Fri, Oct 13, 2023 at 10:56:29AM +0300, Emmanouil Pitsidianakis wrote:
> In preparation of raising -Wimplicit-fallthrough to 5, replace all
> fall-through comments with the fallthrough attribute pseudo-keyword.
> 
> Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
> ---
>  block/block-copy.c    |  1 +
>  block/file-posix.c    |  1 +
>  block/io.c            |  1 +
>  block/iscsi.c         |  1 +
>  block/qcow2-cluster.c |  5 ++++-
>  block/vhdx.c          | 17 +++++++++++++----
>  6 files changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/block/block-copy.c b/block/block-copy.c
> index 1c60368d72..b4ceb6a079 100644
> --- a/block/block-copy.c
> +++ b/block/block-copy.c
...
>      case COPY_RANGE_FULL:
>          ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
>                                   0, s->write_flags);
>          if (ret >= 0) {
>              /* Successful copy-range, increase chunk size.  */
>              *method = COPY_RANGE_FULL;
>              return 0;
>          }
>  
>          trace_block_copy_copy_range_fail(s, offset, ret);
>          *method = COPY_READ_WRITE;
>          /* Fall through to read+write with allocated buffer */
> +        fallthrough;
>  
>      case COPY_READ_WRITE_CLUSTER:
>      case COPY_READ_WRITE:

I like how you kept the comments.

> +++ b/block/qcow2-cluster.c
> @@ -1327,36 +1327,39 @@ static int coroutine_fn calculate_l2_meta(BlockDriverState *bs,
>  /*
>   * Returns true if writing to the cluster pointed to by @l2_entry
>   * requires a new allocation (that is, if the cluster is unallocated
>   * or has refcount > 1 and therefore cannot be written in-place).
>   */
>  static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
>  {
>      switch (qcow2_get_cluster_type(bs, l2_entry)) {
>      case QCOW2_CLUSTER_NORMAL:
> +        fallthrough;
>      case QCOW2_CLUSTER_ZERO_ALLOC:

Why is this one needed?  It looks two case labels for the same code is
okay; the fallthrough attribute is only needed once a case label is no
lonter empty.

>          if (l2_entry & QCOW_OFLAG_COPIED) {
>              return false;
>          }
> -        /* fallthrough */
> +        fallthrough;

This one makes sense.

>      case QCOW2_CLUSTER_UNALLOCATED:
> +        fallthrough;
>      case QCOW2_CLUSTER_COMPRESSED:
> +        fallthrough;

These two also look spurious.

>      case QCOW2_CLUSTER_ZERO_PLAIN:
>          return true;
>      default:
>          abort();
>      }
>  }
...
> +++ b/block/vhdx.c
> @@ -1176,60 +1176,65 @@ static int coroutine_fn GRAPH_RDLOCK
>  vhdx_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
>                QEMUIOVector *qiov)
...
>              /* check the payload block state */
>              switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) {
> -            case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
> +            case PAYLOAD_BLOCK_NOT_PRESENT:
> +                fallthrough;
>              case PAYLOAD_BLOCK_UNDEFINED:
> +                fallthrough;
>              case PAYLOAD_BLOCK_UNMAPPED:
> +                fallthrough;
>              case PAYLOAD_BLOCK_UNMAPPED_v095:
> +                fallthrough;

All four of these look spurious; although the old comment is also
spurious, so I'd be happy with deleting it without replacement.

>              case PAYLOAD_BLOCK_ZERO:
>                  /* return zero */
>                  qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail);
>                  break;
>              case PAYLOAD_BLOCK_FULLY_PRESENT:
>                  qemu_co_mutex_unlock(&s->lock);
>                  ret = bdrv_co_preadv(bs->file, sinfo.file_offset,
>                                       sinfo.sectors_avail * BDRV_SECTOR_SIZE,
>                                       &hd_qiov, 0);
>                  qemu_co_mutex_lock(&s->lock);
>                  if (ret < 0) {
>                      goto exit;
>                  }
>                  break;
>              case PAYLOAD_BLOCK_PARTIALLY_PRESENT:
>                  /* we don't yet support difference files, fall through
>                   * to error */
> +                fallthrough;
>              default:

But keeping this one because of the comment is reasonable.

...
>              switch (bat_state) {
>              case PAYLOAD_BLOCK_ZERO:
>                  /* in this case, we need to preserve zero writes for
>                   * data that is not part of this write, so we must pad
>                   * the rest of the buffer to zeroes */
>                  use_zero_buffers = true;
> -                /* fall through */
> -            case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
> +                fallthrough;
> +            case PAYLOAD_BLOCK_NOT_PRESENT:

This one is necessary;

> +                fallthrough;
>              case PAYLOAD_BLOCK_UNMAPPED:
> +                fallthrough;
>              case PAYLOAD_BLOCK_UNMAPPED_v095:
> +                fallthrough;
>              case PAYLOAD_BLOCK_UNDEFINED:

but these three seem spurious.

I like the direction this is headed in, but there's enough I pointed
out that I'll withhold R-b on this version.
Manos Pitsidianakis Oct. 18, 2023, 5:50 a.m. UTC | #2
On Wed, 18 Oct 2023 01:11, Eric Blake <eblake@redhat.com> wrote:
>>  static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t 
>>  l2_entry)
>>  {
>>      switch (qcow2_get_cluster_type(bs, l2_entry)) {
>>      case QCOW2_CLUSTER_NORMAL:
>> +        fallthrough;
>>      case QCOW2_CLUSTER_ZERO_ALLOC:
>
>Why is this one needed?  It looks two case labels for the same code is
>okay; the fallthrough attribute is only needed once a case label is no
>lonter empty.
> ...
>These two also look spurious.
>
> ...
>but these three seem spurious.

Thanks for pointing it out, Eric. Indeed these are mistakes.

By the way, there's a newer version posted [0] because as you noticed I 
accidentally left --function-context on git-format-patch and didn't 
notice that some of them blew up in size.

Also, the consensus in v1 was to reject this patch series in general 
[1].

[0]: <cover.1697186560.git.manos.pitsidianakis@linaro.org> 
https://lore.kernel.org/qemu-devel/cover.1697186560.git.manos.pitsidianakis@linaro.org/

[1]: 
<CAFEAcA_fLBe9CuWFYpeuejj8dcerhFtPNX+iVaVFvH4SXx1oAg@mail.gmail.com>
https://lore.kernel.org/qemu-devel/CAFEAcA_fLBe9CuWFYpeuejj8dcerhFtPNX+iVaVFvH4SXx1oAg@mail.gmail.com/


--
Manos
diff mbox series

Patch

diff --git a/block/block-copy.c b/block/block-copy.c
index 1c60368d72..b4ceb6a079 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -473,78 +473,79 @@  static int coroutine_fn GRAPH_RDLOCK
 block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
                    BlockCopyMethod *method, bool *error_is_read)
 {
     int ret;
     int64_t nbytes = MIN(offset + bytes, s->len) - offset;
     void *bounce_buffer = NULL;
 
     assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
     assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
     assert(offset < s->len);
     assert(offset + bytes <= s->len ||
            offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
     assert(nbytes < INT_MAX);
 
     switch (*method) {
     case COPY_WRITE_ZEROES:
         ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
                                     ~BDRV_REQ_WRITE_COMPRESSED);
         if (ret < 0) {
             trace_block_copy_write_zeroes_fail(s, offset, ret);
             *error_is_read = false;
         }
         return ret;
 
     case COPY_RANGE_SMALL:
     case COPY_RANGE_FULL:
         ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
                                  0, s->write_flags);
         if (ret >= 0) {
             /* Successful copy-range, increase chunk size.  */
             *method = COPY_RANGE_FULL;
             return 0;
         }
 
         trace_block_copy_copy_range_fail(s, offset, ret);
         *method = COPY_READ_WRITE;
         /* Fall through to read+write with allocated buffer */
+        fallthrough;
 
     case COPY_READ_WRITE_CLUSTER:
     case COPY_READ_WRITE:
         /*
          * In case of failed copy_range request above, we may proceed with
          * buffered request larger than BLOCK_COPY_MAX_BUFFER.
          * Still, further requests will be properly limited, so don't care too
          * much. Moreover the most likely case (copy_range is unsupported for
          * the configuration, so the very first copy_range request fails)
          * is handled by setting large copy_size only after first successful
          * copy_range.
          */
 
         bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
 
         ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
         if (ret < 0) {
             trace_block_copy_read_fail(s, offset, ret);
             *error_is_read = true;
             goto out;
         }
 
         ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
                              s->write_flags);
         if (ret < 0) {
             trace_block_copy_write_fail(s, offset, ret);
             *error_is_read = false;
             goto out;
         }
 
     out:
         qemu_vfree(bounce_buffer);
         break;
 
     default:
         abort();
     }
 
     return ret;
 }
diff --git a/block/file-posix.c b/block/file-posix.c
index 50e2b20d5c..31c7719da5 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -972,69 +972,70 @@  static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
 static int raw_handle_perm_lock(BlockDriverState *bs,
                                 RawPermLockOp op,
                                 uint64_t new_perm, uint64_t new_shared,
                                 Error **errp)
 {
     BDRVRawState *s = bs->opaque;
     int ret = 0;
     Error *local_err = NULL;
 
     if (!s->use_lock) {
         return 0;
     }
 
     if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) {
         return 0;
     }
 
     switch (op) {
     case RAW_PL_PREPARE:
         if ((s->perm | new_perm) == s->perm &&
             (s->shared_perm & new_shared) == s->shared_perm)
         {
             /*
              * We are going to unlock bytes, it should not fail. If it fail due
              * to some fs-dependent permission-unrelated reasons (which occurs
              * sometimes on NFS and leads to abort in bdrv_replace_child) we
              * can't prevent such errors by any check here. And we ignore them
              * anyway in ABORT and COMMIT.
              */
             return 0;
         }
         ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm,
                                    ~s->shared_perm | ~new_shared,
                                    false, errp);
         if (!ret) {
             ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp);
             if (!ret) {
                 return 0;
             }
             error_append_hint(errp,
                               "Is another process using the image [%s]?\n",
                               bs->filename);
         }
         /* fall through to unlock bytes. */
+        fallthrough;
     case RAW_PL_ABORT:
         raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm,
                              true, &local_err);
         if (local_err) {
             /* Theoretically the above call only unlocks bytes and it cannot
              * fail. Something weird happened, report it.
              */
             warn_report_err(local_err);
         }
         break;
     case RAW_PL_COMMIT:
         raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared,
                              true, &local_err);
         if (local_err) {
             /* Theoretically the above call only unlocks bytes and it cannot
              * fail. Something weird happened, report it.
              */
             warn_report_err(local_err);
         }
         break;
     }
     return ret;
 }
 
 /* Sets a specific flag */
diff --git a/block/io.c b/block/io.c
index e7f9448d5a..cc05457d02 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2007,43 +2007,44 @@  static inline void coroutine_fn
 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
                          BdrvTrackedRequest *req, int ret)
 {
     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
     BlockDriverState *bs = child->bs;
 
     bdrv_check_request(offset, bytes, &error_abort);
 
     qatomic_inc(&bs->write_gen);
 
     /*
      * Discard cannot extend the image, but in error handling cases, such as
      * when reverting a qcow2 cluster allocation, the discarded range can pass
      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
      * here. Instead, just skip it, since semantically a discard request
      * beyond EOF cannot expand the image anyway.
      */
     if (ret == 0 &&
         (req->type == BDRV_TRACKED_TRUNCATE ||
          end_sector > bs->total_sectors) &&
         req->type != BDRV_TRACKED_DISCARD) {
         bs->total_sectors = end_sector;
         bdrv_parent_cb_resize(bs);
         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
     }
     if (req->bytes) {
         switch (req->type) {
         case BDRV_TRACKED_WRITE:
             stat64_max(&bs->wr_highest_offset, offset + bytes);
             /* fall through, to set dirty bits */
+            fallthrough;
         case BDRV_TRACKED_DISCARD:
             bdrv_set_dirty(bs, offset, bytes);
             break;
         default:
             break;
         }
     }
 }
 
 /*
  * Forwards an already correctly aligned write request to the BlockDriver,
  * after possibly fragmenting it.
  */
diff --git a/block/iscsi.c b/block/iscsi.c
index 5640c8b565..2fb7037748 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -1430,69 +1430,70 @@  static void iscsi_nop_timed_event(void *opaque)
 static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
 {
     struct scsi_task *task = NULL;
     struct scsi_readcapacity10 *rc10 = NULL;
     struct scsi_readcapacity16 *rc16 = NULL;
     int retries = ISCSI_CMD_RETRIES; 
 
     do {
         if (task != NULL) {
             scsi_free_scsi_task(task);
             task = NULL;
         }
 
         switch (iscsilun->type) {
         case TYPE_DISK:
             task = iscsi_readcapacity16_sync(iscsilun->iscsi, iscsilun->lun);
             if (task != NULL && task->status == SCSI_STATUS_GOOD) {
                 rc16 = scsi_datain_unmarshall(task);
                 if (rc16 == NULL) {
                     error_setg(errp, "iSCSI: Failed to unmarshall readcapacity16 data.");
                 } else {
                     iscsilun->block_size = rc16->block_length;
                     iscsilun->num_blocks = rc16->returned_lba + 1;
                     iscsilun->lbpme = !!rc16->lbpme;
                     iscsilun->lbprz = !!rc16->lbprz;
                     iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff);
                 }
                 break;
             }
             if (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION
                 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
                 break;
             }
             /* Fall through and try READ CAPACITY(10) instead.  */
+            fallthrough;
         case TYPE_ROM:
             task = iscsi_readcapacity10_sync(iscsilun->iscsi, iscsilun->lun, 0, 0);
             if (task != NULL && task->status == SCSI_STATUS_GOOD) {
                 rc10 = scsi_datain_unmarshall(task);
                 if (rc10 == NULL) {
                     error_setg(errp, "iSCSI: Failed to unmarshall readcapacity10 data.");
                 } else {
                     iscsilun->block_size = rc10->block_size;
                     if (rc10->lba == 0) {
                         /* blank disk loaded */
                         iscsilun->num_blocks = 0;
                     } else {
                         iscsilun->num_blocks = rc10->lba + 1;
                     }
                 }
             }
             break;
         default:
             return;
         }
     } while (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION
              && task->sense.key == SCSI_SENSE_UNIT_ATTENTION
              && retries-- > 0);
 
     if (task == NULL || task->status != SCSI_STATUS_GOOD) {
         error_setg(errp, "iSCSI: failed to send readcapacity10/16 command");
     } else if (!iscsilun->block_size ||
                iscsilun->block_size % BDRV_SECTOR_SIZE) {
         error_setg(errp, "iSCSI: the target returned an invalid "
                    "block size of %d.", iscsilun->block_size);
     }
     if (task) {
         scsi_free_scsi_task(task);
     }
 }
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index f4f6cd6ad0..c50143d493 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -1327,36 +1327,39 @@  static int coroutine_fn calculate_l2_meta(BlockDriverState *bs,
 /*
  * Returns true if writing to the cluster pointed to by @l2_entry
  * requires a new allocation (that is, if the cluster is unallocated
  * or has refcount > 1 and therefore cannot be written in-place).
  */
 static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
 {
     switch (qcow2_get_cluster_type(bs, l2_entry)) {
     case QCOW2_CLUSTER_NORMAL:
+        fallthrough;
     case QCOW2_CLUSTER_ZERO_ALLOC:
         if (l2_entry & QCOW_OFLAG_COPIED) {
             return false;
         }
-        /* fallthrough */
+        fallthrough;
     case QCOW2_CLUSTER_UNALLOCATED:
+        fallthrough;
     case QCOW2_CLUSTER_COMPRESSED:
+        fallthrough;
     case QCOW2_CLUSTER_ZERO_PLAIN:
         return true;
     default:
         abort();
     }
 }
 
 /*
  * Returns the number of contiguous clusters that can be written to
  * using one single write request, starting from @l2_index.
  * At most @nb_clusters are checked.
  *
  * If @new_alloc is true this counts clusters that are either
  * unallocated, or allocated but with refcount > 1 (so they need to be
  * newly allocated and COWed).
  *
  * If @new_alloc is false this counts clusters that are already
  * allocated and can be overwritten in-place (this includes clusters
  * of type QCOW2_CLUSTER_ZERO_ALLOC).
  */
diff --git a/block/vhdx.c b/block/vhdx.c
index a67edcc03e..9000b3fcea 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -1176,60 +1176,65 @@  static int coroutine_fn GRAPH_RDLOCK
 vhdx_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
               QEMUIOVector *qiov)
 {
     BDRVVHDXState *s = bs->opaque;
     int ret = 0;
     VHDXSectorInfo sinfo;
     uint64_t bytes_done = 0;
     QEMUIOVector hd_qiov;
 
     qemu_iovec_init(&hd_qiov, qiov->niov);
 
     qemu_co_mutex_lock(&s->lock);
 
     while (nb_sectors > 0) {
         /* We are a differencing file, so we need to inspect the sector bitmap
          * to see if we have the data or not */
         if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) {
             /* not supported yet */
             ret = -ENOTSUP;
             goto exit;
         } else {
             vhdx_block_translate(s, sector_num, nb_sectors, &sinfo);
 
             qemu_iovec_reset(&hd_qiov);
             qemu_iovec_concat(&hd_qiov, qiov,  bytes_done, sinfo.bytes_avail);
 
             /* check the payload block state */
             switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) {
-            case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
+            case PAYLOAD_BLOCK_NOT_PRESENT:
+                fallthrough;
             case PAYLOAD_BLOCK_UNDEFINED:
+                fallthrough;
             case PAYLOAD_BLOCK_UNMAPPED:
+                fallthrough;
             case PAYLOAD_BLOCK_UNMAPPED_v095:
+                fallthrough;
             case PAYLOAD_BLOCK_ZERO:
                 /* return zero */
                 qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail);
                 break;
             case PAYLOAD_BLOCK_FULLY_PRESENT:
                 qemu_co_mutex_unlock(&s->lock);
                 ret = bdrv_co_preadv(bs->file, sinfo.file_offset,
                                      sinfo.sectors_avail * BDRV_SECTOR_SIZE,
                                      &hd_qiov, 0);
                 qemu_co_mutex_lock(&s->lock);
                 if (ret < 0) {
                     goto exit;
                 }
                 break;
             case PAYLOAD_BLOCK_PARTIALLY_PRESENT:
                 /* we don't yet support difference files, fall through
                  * to error */
+                fallthrough;
             default:
                 ret = -EIO;
                 goto exit;
                 break;
             }
             nb_sectors -= sinfo.sectors_avail;
             sector_num += sinfo.sectors_avail;
             bytes_done += sinfo.bytes_avail;
         }
     }
     ret = 0;
@@ -1330,155 +1335,159 @@  static int coroutine_fn GRAPH_RDLOCK
 vhdx_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
                QEMUIOVector *qiov, int flags)
 {
     int ret = -ENOTSUP;
     BDRVVHDXState *s = bs->opaque;
     VHDXSectorInfo sinfo;
     uint64_t bytes_done = 0;
     uint64_t bat_entry = 0;
     uint64_t bat_entry_offset = 0;
     QEMUIOVector hd_qiov;
     struct iovec iov1 = { 0 };
     struct iovec iov2 = { 0 };
     int sectors_to_write;
     int bat_state;
     uint64_t bat_prior_offset = 0;
     bool bat_update = false;
 
     qemu_iovec_init(&hd_qiov, qiov->niov);
 
     qemu_co_mutex_lock(&s->lock);
 
     ret = vhdx_user_visible_write(bs, s);
     if (ret < 0) {
         goto exit;
     }
 
     while (nb_sectors > 0) {
         bool use_zero_buffers = false;
         bat_update = false;
         if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) {
             /* not supported yet */
             ret = -ENOTSUP;
             goto exit;
         } else {
             vhdx_block_translate(s, sector_num, nb_sectors, &sinfo);
             sectors_to_write = sinfo.sectors_avail;
 
             qemu_iovec_reset(&hd_qiov);
             /* check the payload block state */
             bat_state = s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK;
             switch (bat_state) {
             case PAYLOAD_BLOCK_ZERO:
                 /* in this case, we need to preserve zero writes for
                  * data that is not part of this write, so we must pad
                  * the rest of the buffer to zeroes */
                 use_zero_buffers = true;
-                /* fall through */
-            case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
+                fallthrough;
+            case PAYLOAD_BLOCK_NOT_PRESENT:
+                fallthrough;
             case PAYLOAD_BLOCK_UNMAPPED:
+                fallthrough;
             case PAYLOAD_BLOCK_UNMAPPED_v095:
+                fallthrough;
             case PAYLOAD_BLOCK_UNDEFINED:
                 bat_prior_offset = sinfo.file_offset;
                 ret = vhdx_allocate_block(bs, s, &sinfo.file_offset,
                                           &use_zero_buffers);
                 if (ret < 0) {
                     goto exit;
                 }
                 /*
                  * once we support differencing files, this may also be
                  * partially present
                  */
                 /* update block state to the newly specified state */
                 vhdx_update_bat_table_entry(bs, s, &sinfo, &bat_entry,
                                             &bat_entry_offset,
                                             PAYLOAD_BLOCK_FULLY_PRESENT);
                 bat_update = true;
                 /*
                  * Since we just allocated a block, file_offset is the
                  * beginning of the payload block. It needs to be the
                  * write address, which includes the offset into the
                  * block, unless the entire block needs to read as
                  * zeroes but truncation was not able to provide them,
                  * in which case we need to fill in the rest.
                  */
                 if (!use_zero_buffers) {
                     sinfo.file_offset += sinfo.block_offset;
                 } else {
                     /* zero fill the front, if any */
                     if (sinfo.block_offset) {
                         iov1.iov_len = sinfo.block_offset;
                         iov1.iov_base = qemu_blockalign(bs, iov1.iov_len);
                         memset(iov1.iov_base, 0, iov1.iov_len);
                         qemu_iovec_concat_iov(&hd_qiov, &iov1, 1, 0,
                                               iov1.iov_len);
                         sectors_to_write += iov1.iov_len >> BDRV_SECTOR_BITS;
                     }
 
                     /* our actual data */
                     qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
                                       sinfo.bytes_avail);
 
                     /* zero fill the back, if any */
                     if ((sinfo.bytes_avail - sinfo.block_offset) <
                          s->block_size) {
                         iov2.iov_len = s->block_size -
                                       (sinfo.bytes_avail + sinfo.block_offset);
                         iov2.iov_base = qemu_blockalign(bs, iov2.iov_len);
                         memset(iov2.iov_base, 0, iov2.iov_len);
                         qemu_iovec_concat_iov(&hd_qiov, &iov2, 1, 0,
                                               iov2.iov_len);
                         sectors_to_write += iov2.iov_len >> BDRV_SECTOR_BITS;
                     }
                 }
 
-                /* fall through */
+                fallthrough;
             case PAYLOAD_BLOCK_FULLY_PRESENT:
                 /* if the file offset address is in the header zone,
                  * there is a problem */
                 if (sinfo.file_offset < (1 * MiB)) {
                     ret = -EFAULT;
                     goto error_bat_restore;
                 }
 
                 if (!use_zero_buffers) {
                     qemu_iovec_concat(&hd_qiov, qiov,  bytes_done,
                                       sinfo.bytes_avail);
                 }
                 /* block exists, so we can just overwrite it */
                 qemu_co_mutex_unlock(&s->lock);
                 ret = bdrv_co_pwritev(bs->file, sinfo.file_offset,
                                       sectors_to_write * BDRV_SECTOR_SIZE,
                                       &hd_qiov, 0);
                 qemu_co_mutex_lock(&s->lock);
                 if (ret < 0) {
                     goto error_bat_restore;
                 }
                 break;
             case PAYLOAD_BLOCK_PARTIALLY_PRESENT:
                 /* we don't yet support difference files, fall through
                  * to error */
+                fallthrough;
             default:
                 ret = -EIO;
                 goto exit;
                 break;
             }
 
             if (bat_update) {
                 /* this will update the BAT entry into the log journal, and
                  * then flush the log journal out to disk */
                 ret =  vhdx_log_write_and_flush(bs, s, &bat_entry,
                                                 sizeof(VHDXBatEntry),
                                                 bat_entry_offset);
                 if (ret < 0) {
                     goto exit;
                 }
             }
 
             nb_sectors -= sinfo.sectors_avail;
             sector_num += sinfo.sectors_avail;
             bytes_done += sinfo.bytes_avail;
 
         }
     }
 
     goto exit;