@@ -9,13 +9,29 @@
# test_group <testsuite-name>
#
# Define a test suite. This function should be called before calling any
-# other functions
+# other functions.
+#
+# test_setup <shell-cmd>
+#
+# Register a setup function to be executed before running the tests.
+#
+# test_cleanup <shell-cmd>
+#
+# Register a cleanup function to be executed after running the tests.
#
# test_result <rc> <--|output>
#
# Define the exit code and the output for the test. If there is no output
# from the commands, then '--' can be specified to denote empty output.
-# Multi-line output can be added using a here document.
+# Multi-line output can be added using a here document. Only the output
+# on stdout is captured.
+#
+# test_result_stderr <output>
+#
+# Define the output to stderr for the test. If there is no output from
+# the commands, or if matching of output on stderr is not required, then
+# this command can be ommitted. Multi-line output can be specified
+# using a here document.
#
# test_run <command> [<arguments>]
#
@@ -26,15 +42,31 @@
# This can be called before test_run, to skip the test. This is useful to
# write tests which are dependent on the environment (e.g. architecture).
#
+# test_wrapper
+#
+# To execute commands in a special context, test_wrapper function can be
+# defined. This function will be passed all the arguments to test_run
+# command.
+#
+#
+# Matching output:
+#
+# To match varying output, define result_filter() to filter the output.
+# This allows matching date or time stamps.
+#
#
# Example:
#
+# test_setup "touch somefile"
+# test_cleanup "rm -f somefile"
+#
# test_group "my group of tests"
#
# test_result 0 output1
# test_run command1 arguments1
#
# test_result 1 --
+# test_result_stderr stderr2
# test_run command2 arguments2
#
# test_result 0 output3
@@ -71,6 +103,7 @@ fi
test_started=0
test_skipped=0
test_defined=0
+test_stderr=0
count_total=0
count_skipped=0
@@ -78,17 +111,26 @@ count_failed=0
trap 'test_end' 0
-test_group ()
+test_setup_hooks=""
+
+test_setup ()
{
- test_name=${1:-$test_name}
- test_started=1
+ test_setup_hooks="${test_setup_hooks}${test_setup_hooks:+ && }$*"
+}
- echo "-- $test_name"
+test_cleanup_hooks=""
+
+test_cleanup ()
+{
+ test_cleanup_hooks="${test_cleanup_hooks}${test_cleanup_hooks:+ ; }$*"
}
test_end ()
{
trap 0
+
+ eval $test_cleanup_hooks
+
if [ $count_total -eq 0 ] ; then
status=99
elif [ $count_failed -gt 0 ] ; then
@@ -104,10 +146,18 @@ test_end ()
test_error ()
{
+ trap 0
echo "$@"
exit 99
}
+test_error_skip ()
+{
+ trap 0
+ echo "$@"
+ exit 77
+}
+
test_log ()
{
if [ -z "$test_logfile" ] ; then
@@ -167,10 +217,48 @@ test_output ()
fi
}
+test_wrapper_default ()
+{
+ "$@"
+}
+
+test_wrapper ()
+{
+ test_wrapper_default "$@"
+}
+
+result_filter_default ()
+{
+ cat
+}
+
+result_filter ()
+{
+ result_filter_default
+}
+
#---------------------------------------------------------------------
# Public functions
#---------------------------------------------------------------------
+test_group ()
+{
+ test_name=${1:-$test_name}
+ test_started=1
+
+ echo "-- $test_name"
+
+ eval $test_setup_hooks
+ rc=$?
+ if [ $rc -ne 0 ] ; then
+ if [ $rc -eq 77 ] ; then
+ test_error_skip "setup failed, skipping"
+ else
+ test_error "ERROR: setup failed"
+ fi
+ fi
+}
+
test_result ()
{
if [ $test_started -eq 0 ] ; then
@@ -195,6 +283,28 @@ test_result ()
test_defined=1
}
+test_result_stderr ()
+{
+ if [ $test_started -eq 0 ] ; then
+ test_error "ERROR: missing call to test_group"
+ fi
+ if [ $test_defined -eq 0 ] ; then
+ test_error "ERROR: missing call to test_result"
+ fi
+
+ if [ $# -eq 1 ] ; then
+ required_output_stderr="$1"
+ else
+ if ! tty -s ; then
+ required_output_stderr=$(cat)
+ else
+ required_output_stderr=""
+ fi
+ fi
+
+ test_stderr=1
+}
+
test_skip ()
{
if [ $test_started -eq 0 ] ; then
@@ -225,7 +335,9 @@ test_run ()
return
fi
- output=$("$@" 2>&1)
+ stderr_file=$(mktemp)
+
+ output_raw=$(test_wrapper "$@" 2>"$stderr_file")
rc=$?
if [ $rc -ne $required_rc ] ; then
@@ -233,15 +345,39 @@ test_run ()
test_log "output rc: $rc"
fi
+ output=$(echo "$output_raw" | result_filter)
if [ "$output" != "$required_output" ] ; then
test_log "expected:"
test_log "$required_output"
test_log "output:"
test_log "$output"
+ if [ "$output_raw" != "$output" ] ; then
+ test_log "output raw:"
+ test_log "$output_raw"
+ fi
output_mismatch=1
fi
+ if [ $test_stderr -eq 1 ] ; then
+ output_stderr_raw=$(cat "$stderr_file")
+ output_stderr=$(cat "$stderr_file" | result_filter)
+ if [ "$output_stderr" != "$required_output_stderr" ] ; then
+ test_log "expected stderr:"
+ test_log "$required_output_stderr"
+ test_log "output stderr:"
+ test_log "$output_stderr"
+ if [ "$output_stderr_raw" != "$output_stderr" ] ; then
+ test_log "output stderr raw:"
+ test_log "$output_stderr_raw"
+ fi
+ output_mismatch=1
+ fi
+ fi
+
+ rm -f "$stderr_file"
+
test_output $rc $required_rc $output_mismatch
test_skipped=0
test_defined=0
+ test_stderr=0
}
@@ -6,8 +6,16 @@ TEST_TRS=""
. $(dirname "$0")/driver.sh
+test_setup "echo test start"
+test_cleanup "echo test end"
+
+test_setup "echo real start"
+test_cleanup "echo real end"
+
test_group "test driver tests"
+echo
+
echo "test should PASS"
test_result 0 foo
test_run echo foo
@@ -36,3 +44,41 @@ echo "test should SKIP"
test_result 0 --
test_skip
test_run echo foo
+
+echo
+
+echo_stderr ()
+{
+ echo "$*" >&2
+}
+
+echo "match stderr"
+test_result 0 --
+test_result_stderr foo
+test_run echo_stderr foo
+
+echo
+
+result_filter ()
+{
+ sed -e 's#[0-9][0-9][0-9]#NUM3#g' \
+ -e 's#[0-9][0-9]#NUM2#g'
+}
+
+test_result 0 NUM2
+test_run echo 42
+
+test_result 0 NUM3
+test_run echo 666
+
+echo
+
+test_wrapper ()
+{
+ echo "output: $*"
+}
+
+test_result 0 "output: foobar"
+test_run foobar
+
+echo
@@ -348,7 +348,8 @@ do_skip
test_run pdbg -b fake -p0 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
No processor(s) selected
Use -p or -a to select processor(s)
EOF
@@ -357,7 +358,8 @@ do_skip
test_run pdbg -b fake -c0 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
No processor(s) selected
Use -p or -a to select processor(s)
EOF
@@ -366,7 +368,8 @@ do_skip
test_run pdbg -b fake -t0 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
No processor(s) selected
Use -p or -a to select processor(s)
EOF
@@ -375,7 +378,8 @@ do_skip
test_run pdbg -b fake -c0 -t0 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
No chip(s) selected
Use -c or -a to select chip(s)
EOF
@@ -469,7 +473,8 @@ do_skip
test_run pdbg -b fake -p1,3 -p5 -p7-9 -c1 -c3 -c5 -t1 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
Value 100 larger than max 63
Failed to parse '-p 100'
EOF
@@ -478,7 +483,8 @@ do_skip
test_run pdbg -b fake -p100 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
Value 100 larger than max 23
Failed to parse '-c 100'
EOF
@@ -487,7 +493,8 @@ do_skip
test_run pdbg -b fake -c100 probe
-test_result 1 <<EOF
+test_result 1 --
+test_result_stderr <<EOF
Value 100 larger than max 7
Failed to parse '-t 100'
EOF
test_setup - Register hooks to be called before running tests test_cleanup - Register hooks to be called after running tests test_result - Will only match the output on stdout test_result_stderr - Match the output on stderr Following functions can be overriden. test_wrapper - To run test commands in a special context result_filter - To match varying output (typically using sed) Signed-off-by: Amitay Isaacs <amitay@ozlabs.org> --- tests/driver.sh | 150 ++++++++++++++++++++++++++++++++++++++-- tests/test_driver.sh | 46 ++++++++++++ tests/test_selection.sh | 21 ++++-- 3 files changed, 203 insertions(+), 14 deletions(-)