diff mbox series

[ovs-dev] dpif: fix memory leak of pmd_list after usage

Message ID 20210812151134.3113110-1-harry.van.haaren@intel.com
State Superseded
Headers show
Series [ovs-dev] dpif: fix memory leak of pmd_list after usage | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed

Commit Message

Van Haaren, Harry Aug. 12, 2021, 3:11 p.m. UTC
This commit fixes a memory leak when a pmd_list is retrieved
from the sorted_poll_thread_list() function. Inside the function,
the pmd list is allocated, but it was not freed once no longer
required for the command functionality. These leaks were found
by a static analysis tool.

Fixes: 3d8f47bc04 ("dpif-netdev: Add command line and function pointer for miniflow extract")
Fixes: abb807e27d ("dpif-netdev: Add command to switch dpif implementation.")
Fixes: 3d018c3ea7 ("dpif-netdev: Add subtable lookup prio set command.")

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>

---

 lib/dpif-netdev.c | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

Comments

Ilya Maximets Aug. 12, 2021, 3:27 p.m. UTC | #1
On 8/12/21 5:11 PM, Harry van Haaren wrote:
> This commit fixes a memory leak when a pmd_list is retrieved
> from the sorted_poll_thread_list() function. Inside the function,
> the pmd list is allocated, but it was not freed once no longer
> required for the command functionality. These leaks were found
> by a static analysis tool.

Hmm.  Thanks for spotting this!

Could you, please, add unit tests for these functions?  Something very
basic will work.  Even if these functions will not do anything useful,
e.g. changing priority of the only implementation, asan should spot the
leak and fail the CI job that we have.

> 
> Fixes: 3d8f47bc04 ("dpif-netdev: Add command line and function pointer for miniflow extract")
> Fixes: abb807e27d ("dpif-netdev: Add command to switch dpif implementation.")
> Fixes: 3d018c3ea7 ("dpif-netdev: Add subtable lookup prio set command.")

Please, split the change for the last one to the separate patch, so
it can be backported to earlier branches.

> 
> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
> 
> ---
> 
>  lib/dpif-netdev.c | 18 +++++++++++++-----
>  1 file changed, 13 insertions(+), 5 deletions(-)

The patch name should have a 'dpif-netdev' area prefix.

> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 03f460c7d1..99779bb402 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -960,12 +960,13 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>      }
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
> +
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
>          struct dp_netdev *dp = node->data;
>  
>          /* Get PMD threads list, required to get DPCLS instances. */
>          size_t n;
> -        struct dp_netdev_pmd_thread **pmd_list;
>          sorted_poll_thread_list(dp, &pmd_list, &n);

This is called in a loop, so we're leaking the array on every
iteration.  Same for other parts of the patch.

>  
>          /* take port mutex as HMAP iters over them. */
> @@ -996,6 +997,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          ovs_mutex_unlock(&dp->port_mutex);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      struct ds reply = DS_EMPTY_INITIALIZER;
>      ds_put_format(&reply,
> @@ -1013,10 +1015,10 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>  {
>      struct ds reply = DS_EMPTY_INITIALIZER;
>      struct shash_node *node;
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1026,6 +1028,8 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          dp_netdev_impl_get(&reply, pmd_list, n);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
> +
>      unixctl_command_reply(conn, ds_cstr(&reply));
>      ds_destroy(&reply);
>  }
> @@ -1058,12 +1062,12 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          return;
>      }
>  
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
>          struct dp_netdev *dp = node->data;
>  
>          /* Get PMD threads list, required to get DPCLS instances. */
>          size_t n;
> -        struct dp_netdev_pmd_thread **pmd_list;
>          sorted_poll_thread_list(dp, &pmd_list, &n);
>  
>          for (size_t i = 0; i < n; i++) {
> @@ -1080,6 +1084,7 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          };
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      /* Reply with success to command. */
>      struct ds reply = DS_EMPTY_INITIALIZER;
> @@ -1099,8 +1104,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>      struct shash_node *node;
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1110,6 +1115,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          dp_mfex_impl_get(&reply, pmd_list, n);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
> +
>      unixctl_command_reply(conn, ds_cstr(&reply));
>      ds_destroy(&reply);
>  }
> @@ -1243,8 +1250,8 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>       */
>      ovs_mutex_lock(&dp_netdev_mutex);
>  
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1269,6 +1276,7 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>      }
>  
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      /* If PMD thread was specified, but it wasn't found, return error. */
>      if (pmd_thread_to_change != NON_PMD_CORE_ID && !pmd_thread_update_done) {
>
Kevin Traynor Aug. 12, 2021, 3:57 p.m. UTC | #2
On 12/08/2021 16:11, Harry van Haaren wrote:
> This commit fixes a memory leak when a pmd_list is retrieved
> from the sorted_poll_thread_list() function. Inside the function,
> the pmd list is allocated, but it was not freed once no longer
> required for the command functionality. These leaks were found
> by a static analysis tool.
> 
> Fixes: 3d8f47bc04 ("dpif-netdev: Add command line and function pointer for miniflow extract")
> Fixes: abb807e27d ("dpif-netdev: Add command to switch dpif implementation.")
> Fixes: 3d018c3ea7 ("dpif-netdev: Add subtable lookup prio set command.")
> 
> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
> 
> ---
> 
>  lib/dpif-netdev.c | 18 +++++++++++++-----
>  1 file changed, 13 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 03f460c7d1..99779bb402 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -960,12 +960,13 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>      }
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
> +
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;

I think it's more common style to put this at the start of the code
block, so in this case the start of the function.

Other than that (and Ilya's comments) the fixes lgtm.

>      SHASH_FOR_EACH (node, &dp_netdevs) {
>          struct dp_netdev *dp = node->data;
>  
>          /* Get PMD threads list, required to get DPCLS instances. */
>          size_t n;
> -        struct dp_netdev_pmd_thread **pmd_list;
>          sorted_poll_thread_list(dp, &pmd_list, &n);
>  
>          /* take port mutex as HMAP iters over them. */
> @@ -996,6 +997,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          ovs_mutex_unlock(&dp->port_mutex);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      struct ds reply = DS_EMPTY_INITIALIZER;
>      ds_put_format(&reply,
> @@ -1013,10 +1015,10 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>  {
>      struct ds reply = DS_EMPTY_INITIALIZER;
>      struct shash_node *node;
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1026,6 +1028,8 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          dp_netdev_impl_get(&reply, pmd_list, n);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
> +
>      unixctl_command_reply(conn, ds_cstr(&reply));
>      ds_destroy(&reply);
>  }
> @@ -1058,12 +1062,12 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          return;
>      }
>  
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
>          struct dp_netdev *dp = node->data;
>  
>          /* Get PMD threads list, required to get DPCLS instances. */
>          size_t n;
> -        struct dp_netdev_pmd_thread **pmd_list;
>          sorted_poll_thread_list(dp, &pmd_list, &n);
>  
>          for (size_t i = 0; i < n; i++) {
> @@ -1080,6 +1084,7 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          };
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      /* Reply with success to command. */
>      struct ds reply = DS_EMPTY_INITIALIZER;
> @@ -1099,8 +1104,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>      struct shash_node *node;
>  
>      ovs_mutex_lock(&dp_netdev_mutex);
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1110,6 +1115,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>          dp_mfex_impl_get(&reply, pmd_list, n);
>      }
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
> +
>      unixctl_command_reply(conn, ds_cstr(&reply));
>      ds_destroy(&reply);
>  }
> @@ -1243,8 +1250,8 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>       */
>      ovs_mutex_lock(&dp_netdev_mutex);
>  
> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>      SHASH_FOR_EACH (node, &dp_netdevs) {
> -        struct dp_netdev_pmd_thread **pmd_list;
>          struct dp_netdev *dp = node->data;
>          size_t n;
>  
> @@ -1269,6 +1276,7 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>      }
>  
>      ovs_mutex_unlock(&dp_netdev_mutex);
> +    free(pmd_list);
>  
>      /* If PMD thread was specified, but it wasn't found, return error. */
>      if (pmd_thread_to_change != NON_PMD_CORE_ID && !pmd_thread_update_done) {
>
Kevin Traynor Aug. 12, 2021, 4:02 p.m. UTC | #3
On 12/08/2021 16:27, Ilya Maximets wrote:
> On 8/12/21 5:11 PM, Harry van Haaren wrote:
>> This commit fixes a memory leak when a pmd_list is retrieved
>> from the sorted_poll_thread_list() function. Inside the function,
>> the pmd list is allocated, but it was not freed once no longer
>> required for the command functionality. These leaks were found
>> by a static analysis tool.
> 
> Hmm.  Thanks for spotting this!
> 
> Could you, please, add unit tests for these functions?  Something very
> basic will work.  Even if these functions will not do anything useful,
> e.g. changing priority of the only implementation, asan should spot the
> leak and fail the CI job that we have.
> 
>>
>> Fixes: 3d8f47bc04 ("dpif-netdev: Add command line and function pointer for miniflow extract")
>> Fixes: abb807e27d ("dpif-netdev: Add command to switch dpif implementation.")
>> Fixes: 3d018c3ea7 ("dpif-netdev: Add subtable lookup prio set command.")
> 
> Please, split the change for the last one to the separate patch, so
> it can be backported to earlier branches.
> 
>>
>> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
>>
>> ---
>>
>>  lib/dpif-netdev.c | 18 +++++++++++++-----
>>  1 file changed, 13 insertions(+), 5 deletions(-)
> 
> The patch name should have a 'dpif-netdev' area prefix.
> 
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>> index 03f460c7d1..99779bb402 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -960,12 +960,13 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>      }
>>  
>>      ovs_mutex_lock(&dp_netdev_mutex);
>> +
>> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>>      SHASH_FOR_EACH (node, &dp_netdevs) {
>>          struct dp_netdev *dp = node->data;
>>  
>>          /* Get PMD threads list, required to get DPCLS instances. */
>>          size_t n;
>> -        struct dp_netdev_pmd_thread **pmd_list;
>>          sorted_poll_thread_list(dp, &pmd_list, &n);
> 
> This is called in a loop, so we're leaking the array on every
> iteration.  Same for other parts of the patch.
> 

Ah yes, ignore my other mail.

>>  
>>          /* take port mutex as HMAP iters over them. */
>> @@ -996,6 +997,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>          ovs_mutex_unlock(&dp->port_mutex);
>>      }
>>      ovs_mutex_unlock(&dp_netdev_mutex);
>> +    free(pmd_list);
>>  
>>      struct ds reply = DS_EMPTY_INITIALIZER;
>>      ds_put_format(&reply,
>> @@ -1013,10 +1015,10 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>  {
>>      struct ds reply = DS_EMPTY_INITIALIZER;
>>      struct shash_node *node;
>> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>>  
>>      ovs_mutex_lock(&dp_netdev_mutex);
>>      SHASH_FOR_EACH (node, &dp_netdevs) {
>> -        struct dp_netdev_pmd_thread **pmd_list;
>>          struct dp_netdev *dp = node->data;
>>          size_t n;
>>  
>> @@ -1026,6 +1028,8 @@ dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>          dp_netdev_impl_get(&reply, pmd_list, n);
>>      }
>>      ovs_mutex_unlock(&dp_netdev_mutex);
>> +    free(pmd_list);
>> +
>>      unixctl_command_reply(conn, ds_cstr(&reply));
>>      ds_destroy(&reply);
>>  }
>> @@ -1058,12 +1062,12 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>          return;
>>      }
>>  
>> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>>      SHASH_FOR_EACH (node, &dp_netdevs) {
>>          struct dp_netdev *dp = node->data;
>>  
>>          /* Get PMD threads list, required to get DPCLS instances. */
>>          size_t n;
>> -        struct dp_netdev_pmd_thread **pmd_list;
>>          sorted_poll_thread_list(dp, &pmd_list, &n);
>>  
>>          for (size_t i = 0; i < n; i++) {
>> @@ -1080,6 +1084,7 @@ dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>          };
>>      }
>>      ovs_mutex_unlock(&dp_netdev_mutex);
>> +    free(pmd_list);
>>  
>>      /* Reply with success to command. */
>>      struct ds reply = DS_EMPTY_INITIALIZER;
>> @@ -1099,8 +1104,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>      struct shash_node *node;
>>  
>>      ovs_mutex_lock(&dp_netdev_mutex);
>> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>>      SHASH_FOR_EACH (node, &dp_netdevs) {
>> -        struct dp_netdev_pmd_thread **pmd_list;
>>          struct dp_netdev *dp = node->data;
>>          size_t n;
>>  
>> @@ -1110,6 +1115,8 @@ dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
>>          dp_mfex_impl_get(&reply, pmd_list, n);
>>      }
>>      ovs_mutex_unlock(&dp_netdev_mutex);
>> +    free(pmd_list);
>> +
>>      unixctl_command_reply(conn, ds_cstr(&reply));
>>      ds_destroy(&reply);
>>  }
>> @@ -1243,8 +1250,8 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>>       */
>>      ovs_mutex_lock(&dp_netdev_mutex);
>>  
>> +    struct dp_netdev_pmd_thread **pmd_list = NULL;
>>      SHASH_FOR_EACH (node, &dp_netdevs) {
>> -        struct dp_netdev_pmd_thread **pmd_list;
>>          struct dp_netdev *dp = node->data;
>>          size_t n;
>>  
>> @@ -1269,6 +1276,7 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
>>      }
>>  
>>      ovs_mutex_unlock(&dp_netdev_mutex);
>> +    free(pmd_list);
>>  
>>      /* If PMD thread was specified, but it wasn't found, return error. */
>>      if (pmd_thread_to_change != NON_PMD_CORE_ID && !pmd_thread_update_done) {
>>
> 
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
Van Haaren, Harry Aug. 12, 2021, 4:04 p.m. UTC | #4
> -----Original Message-----
> From: Ilya Maximets <i.maximets@ovn.org>
> Sent: Thursday, August 12, 2021 4:27 PM
> To: Van Haaren, Harry <harry.van.haaren@intel.com>; dev@openvswitch.org
> Cc: i.maximets@ovn.org; amber.kumar@intel.com; Stokes, Ian
> <ian.stokes@intel.com>; Flavio Leitner <fbl@sysclose.org>
> Subject: Re: [ovs-dev] [PATCH] dpif: fix memory leak of pmd_list after usage
> 
> On 8/12/21 5:11 PM, Harry van Haaren wrote:
> > This commit fixes a memory leak when a pmd_list is retrieved
> > from the sorted_poll_thread_list() function. Inside the function,
> > the pmd list is allocated, but it was not freed once no longer
> > required for the command functionality. These leaks were found
> > by a static analysis tool.
> 
> Hmm.  Thanks for spotting this!

Automated tooling is great to have.

> Could you, please, add unit tests for these functions?  Something very
> basic will work.  Even if these functions will not do anything useful,
> e.g. changing priority of the only implementation, asan should spot the
> leak and fail the CI job that we have.

Yes will look to add some tests in OVS 2.17 timeframe if that helps.


> > Fixes: 3d8f47bc04 ("dpif-netdev: Add command line and function pointer for
> miniflow extract")
> > Fixes: abb807e27d ("dpif-netdev: Add command to switch dpif implementation.")
> > Fixes: 3d018c3ea7 ("dpif-netdev: Add subtable lookup prio set command.")
> 
> Please, split the change for the last one to the separate patch, so
> it can be backported to earlier branches.

Yes, patch split done in v2; https://patchwork.ozlabs.org/project/openvswitch/list/?series=257839


> > Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
> >
> > ---
> >
> >  lib/dpif-netdev.c | 18 +++++++++++++-----
> >  1 file changed, 13 insertions(+), 5 deletions(-)
> 
> The patch name should have a 'dpif-netdev' area prefix.

Sure, fixed.


> >          /* Get PMD threads list, required to get DPCLS instances. */
> >          size_t n;
> > -        struct dp_netdev_pmd_thread **pmd_list;
> >          sorted_poll_thread_list(dp, &pmd_list, &n);
> 
> This is called in a loop, so we're leaking the array on every
> iteration.  Same for other parts of the patch.

Doh. This is the result of rushing a fix I suppose. Thanks for pointing
out, and indeed for the other occurrences too. Fixed in V2.

<snip patch>

Kevin Traynor; you mentioned in the other email a better way
to handle the below variables. They're no longer part of the V2 patchset,
but noted your review of V1, thanks.

> > +    struct dp_netdev_pmd_thread **pmd_list = NULL;

Regards, -Harry
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 03f460c7d1..99779bb402 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -960,12 +960,13 @@  dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
     }
 
     ovs_mutex_lock(&dp_netdev_mutex);
+
+    struct dp_netdev_pmd_thread **pmd_list = NULL;
     SHASH_FOR_EACH (node, &dp_netdevs) {
         struct dp_netdev *dp = node->data;
 
         /* Get PMD threads list, required to get DPCLS instances. */
         size_t n;
-        struct dp_netdev_pmd_thread **pmd_list;
         sorted_poll_thread_list(dp, &pmd_list, &n);
 
         /* take port mutex as HMAP iters over them. */
@@ -996,6 +997,7 @@  dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
         ovs_mutex_unlock(&dp->port_mutex);
     }
     ovs_mutex_unlock(&dp_netdev_mutex);
+    free(pmd_list);
 
     struct ds reply = DS_EMPTY_INITIALIZER;
     ds_put_format(&reply,
@@ -1013,10 +1015,10 @@  dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
 {
     struct ds reply = DS_EMPTY_INITIALIZER;
     struct shash_node *node;
+    struct dp_netdev_pmd_thread **pmd_list = NULL;
 
     ovs_mutex_lock(&dp_netdev_mutex);
     SHASH_FOR_EACH (node, &dp_netdevs) {
-        struct dp_netdev_pmd_thread **pmd_list;
         struct dp_netdev *dp = node->data;
         size_t n;
 
@@ -1026,6 +1028,8 @@  dpif_netdev_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
         dp_netdev_impl_get(&reply, pmd_list, n);
     }
     ovs_mutex_unlock(&dp_netdev_mutex);
+    free(pmd_list);
+
     unixctl_command_reply(conn, ds_cstr(&reply));
     ds_destroy(&reply);
 }
@@ -1058,12 +1062,12 @@  dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
         return;
     }
 
+    struct dp_netdev_pmd_thread **pmd_list = NULL;
     SHASH_FOR_EACH (node, &dp_netdevs) {
         struct dp_netdev *dp = node->data;
 
         /* Get PMD threads list, required to get DPCLS instances. */
         size_t n;
-        struct dp_netdev_pmd_thread **pmd_list;
         sorted_poll_thread_list(dp, &pmd_list, &n);
 
         for (size_t i = 0; i < n; i++) {
@@ -1080,6 +1084,7 @@  dpif_netdev_impl_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
         };
     }
     ovs_mutex_unlock(&dp_netdev_mutex);
+    free(pmd_list);
 
     /* Reply with success to command. */
     struct ds reply = DS_EMPTY_INITIALIZER;
@@ -1099,8 +1104,8 @@  dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
     struct shash_node *node;
 
     ovs_mutex_lock(&dp_netdev_mutex);
+    struct dp_netdev_pmd_thread **pmd_list = NULL;
     SHASH_FOR_EACH (node, &dp_netdevs) {
-        struct dp_netdev_pmd_thread **pmd_list;
         struct dp_netdev *dp = node->data;
         size_t n;
 
@@ -1110,6 +1115,8 @@  dpif_miniflow_extract_impl_get(struct unixctl_conn *conn, int argc OVS_UNUSED,
         dp_mfex_impl_get(&reply, pmd_list, n);
     }
     ovs_mutex_unlock(&dp_netdev_mutex);
+    free(pmd_list);
+
     unixctl_command_reply(conn, ds_cstr(&reply));
     ds_destroy(&reply);
 }
@@ -1243,8 +1250,8 @@  dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
      */
     ovs_mutex_lock(&dp_netdev_mutex);
 
+    struct dp_netdev_pmd_thread **pmd_list = NULL;
     SHASH_FOR_EACH (node, &dp_netdevs) {
-        struct dp_netdev_pmd_thread **pmd_list;
         struct dp_netdev *dp = node->data;
         size_t n;
 
@@ -1269,6 +1276,7 @@  dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
     }
 
     ovs_mutex_unlock(&dp_netdev_mutex);
+    free(pmd_list);
 
     /* If PMD thread was specified, but it wasn't found, return error. */
     if (pmd_thread_to_change != NON_PMD_CORE_ID && !pmd_thread_update_done) {