diff mbox series

[ovs-dev,10/16] tests: Improve "Load balancer health check and Service Monitor sync".

Message ID 20201030002447.936548-10-blp@ovn.org
State Accepted
Headers show
Series [ovs-dev,01/16] tests: Drop support for glibc before version 2.11. | expand

Commit Message

Ben Pfaff Oct. 30, 2020, 12:24 a.m. UTC
This test wasn't very debuggable when it failed because it didn't
output the full flow table.  This commit improves on that.

This commit also removes dependencies on southbound flow table numbers,
and adds a lot of checks for various commands that didn't have them.

Signed-off-by: Ben Pfaff <blp@ovn.org>
---
 tests/ovn-northd.at | 142 ++++++++++++++++++++++++--------------------
 1 file changed, 79 insertions(+), 63 deletions(-)
diff mbox series

Patch

diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 90f3c8f19241..c94a175dfb3d 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1065,16 +1065,17 @@  AT_SETUP([ovn -- check Load balancer health check and Service Monitor sync])
 AT_SKIP_IF([test $HAVE_PYTHON = no])
 ovn_start
 
-ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80
+check ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80
 
-ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1
-ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:20.0.0.3=sw1-p1
+check ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1
+check ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:20.0.0.3=sw1-p1
 
 wait_row_count Service_Monitor 0
 
-ovn-nbctl --wait=sb -- --id=@hc create \
+AT_CHECK([ovn-nbctl --wait=sb -- --id=@hc create \
 Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer . \
-health_check @hc
+health_check @hc | uuidfilt], [0], [<0>
+])
 
 wait_row_count Service_Monitor 0
 
@@ -1097,46 +1098,53 @@  wait_row_count Service_Monitor 1
 ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
 wait_row_count Service_Monitor 2
 
-ovn-nbctl --wait=sb ls-lb-add sw0 lb1
+check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+AT_CAPTURE_FILE([sbflows])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows | grep 'priority=120.*ct_lb' | sed 's/table=..//'], 0, [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
 # Delete the Load_Balancer_Health_Check
 ovn-nbctl --wait=sb clear load_balancer . health_check
 wait_row_count Service_Monitor 0
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+AT_CAPTURE_FILE([sbflows2])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows2 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
 # Create the Load_Balancer_Health_Check again.
 ovn-nbctl --wait=sb -- --id=@hc create \
 Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer . \
 health_check @hc
-
 wait_row_count Service_Monitor 2
 
 ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
 # Get the uuid of both the service_monitor
-sm_sw0_p1=`ovn-sbctl --bare --columns _uuid find service_monitor logical_port=sw0-p1`
-sm_sw1_p1=`ovn-sbctl --bare --columns _uuid find service_monitor logical_port=sw1-p1`
+sm_sw0_p1=$(fetch_column Service_Monitor _uuid logical_port=sw0-p1)
+sm_sw1_p1=$(fetch_column Service_Monitor _uuid logical_port=sw1-p1)
 
-# Set the service monitor for sw1-p1 to offline
-ovn-sbctl set service_monitor $sm_sw1_p1 status=offline
+AT_CAPTURE_FILE([sbflows3])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows 3 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+])
 
+# Set the service monitor for sw1-p1 to offline
+check ovn-sbctl set service_monitor sw1-p1 status=offline
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=offline
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
+AT_CAPTURE_FILE([sbflows4])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows4 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
 ])
 
 # Set the service monitor for sw0-p1 to offline
@@ -1144,14 +1152,14 @@  ovn-sbctl set service_monitor $sm_sw0_p1 status=offline
 
 wait_row_count Service_Monitor 1 logical_port=sw0-p1 status=offline
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-])
+AT_CAPTURE_FILE([sbflows5])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows5 | grep 'priority=120.*ct_lb'], 1)
 
-ovn-sbctl dump-flows sw0 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" \
-| grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(drop;)
+AT_CAPTURE_FILE([sbflows6])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows6 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" | grep priority=120 | sed 's/table=..//'], [0], [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(drop;)
 ])
 
 # Set the service monitor for sw0-p1 and sw1-p1 to online
@@ -1160,9 +1168,10 @@  ovn-sbctl set service_monitor $sm_sw1_p1 status=online
 
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=online
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+AT_CAPTURE_FILE([sbflows7])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows7 | grep ct_lb | grep priority=120 | sed 's/table=..//'], 0,
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
 # Set the service monitor for sw1-p1 to error
@@ -1171,18 +1180,19 @@  wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=error
 
 ovn-sbctl dump-flows sw0 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" \
 | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
+AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
 ])
 
 # Add one more vip to lb1
-
-ovn-nbctl set load_balancer . vip:"10.0.0.40\:1000"="10.0.0.3:1000,20.0.0.3:80"
+check ovn-nbctl set load_balancer . vip:10.0.0.40\\:1000=10.0.0.3:1000,20.0.0.3:80
 
 # create health_check for new vip - 10.0.0.40
-ovn-nbctl --wait=sb -- --id=@hc create \
-Load_Balancer_Health_Check vip="10.0.0.40\:1000" -- add Load_Balancer . \
-health_check @hc
+AT_CHECK(
+  [ovn-nbctl --wait=sb \
+          -- --id=@hc create Load_Balancer_Health_Check vip=10.0.0.40\\:1000 \
+          -- add Load_Balancer . health_check @hc | uuidfilt], [0], [<0>
+])
 
 # There should be totally 3 rows in service_monitor for -
 #    * 10.0.0.3:80
@@ -1193,52 +1203,58 @@  wait_row_count Service_Monitor 3
 wait_row_count Service_Monitor 2 logical_port=sw0-p1
 wait_row_count Service_Monitor 1 port=1000
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000);)
+AT_CAPTURE_FILE([sbflows9])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows9 | grep ct_lb | grep priority=120 | sed 's/table=..//'],
+  0,
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000);)
 ])
 
 # Set the service monitor for sw1-p1 to online
-ovn-sbctl set service_monitor $sm_sw1_p1 status=online
+check ovn-sbctl set service_monitor sw1-p1 status=online
 
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=online
 
-ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
+AT_CAPTURE_FILE([sbflows10])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows10 | grep ct_lb | grep priority=120 | sed 's/table=..//'],
+  0,
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
 ])
 
 # Associate lb1 to sw1
-ovn-nbctl --wait=sb ls-lb-add sw1 lb1
-ovn-sbctl dump-flows sw1 | grep ct_lb | grep priority=120 > lflows.txt
-AT_CHECK([cat lflows.txt], [0], [dnl
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
-  table=11(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
+check ovn-nbctl --wait=sb ls-lb-add sw1 lb1
+AT_CAPTURE_FILE([sbflows11])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw1 | tee sbflows11 | grep ct_lb | grep priority=120 | sed 's/table=..//'],
+  0, [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
 ])
 
 # Now create lb2 same as lb1 but udp protocol.
-ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80 udp
-lb2_uuid=`ovn-nbctl lb-list | grep udp | awk '{print $1}'`
-ovn-nbctl --wait=sb set load_balancer $lb2_uuid ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
-ovn-nbctl --wait=sb set load_balancer $lb2_uuid ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
+check ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80 udp
+check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
+check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
 
-ovn-nbctl -- --id=@hc create Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer $lb2_uuid health_check @hc
+AT_CHECK([ovn-nbctl -- --id=@hc create Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer lb2 health_check @hc | uuidfilt],
+         [0], [<0>
+])
 
-ovn-nbctl ls-lb-add sw0 lb2
-ovn-nbctl ls-lb-add sw1 lb2
-ovn-nbctl lr-lb-add lr0 lb2
+check ovn-nbctl ls-lb-add sw0 lb2
+check ovn-nbctl ls-lb-add sw1 lb2
 
 wait_row_count Service_Monitor 5
 
 # Change the svc_monitor_mac. This should get reflected in service_monitor table rows.
-ovn-nbctl set NB_Global . options:svc_monitor_mac="fe:a0:65:a2:01:03"
+check ovn-nbctl set NB_Global . options:svc_monitor_mac="fe:a0:65:a2:01:03"
 
 wait_row_count Service_Monitor 5 src_mac='"fe:a0:65:a2:01:03"'
 
 # Change the source ip for 10.0.0.3 backend ip in lb2
-ovn-nbctl --wait=sb set load_balancer $lb2_uuid ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.100
+check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.100
 
 wait_row_count Service_Monitor 1 logical_port=sw0-p1 src_ip=10.0.0.100