summaryrefslogtreecommitdiffstats
path: root/tools/build
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-04-04 08:11:24 +1000
committerChris Johns <chrisj@rtems.org>2017-04-04 08:24:22 +1000
commit258bda306ba25741624488d24a2785c055a9fab0 (patch)
tree66301d9e50cf6dbc08539b441cab8b41ffe2f895 /tools/build
parentbsp/qoriq: Fix L1 cache flush (diff)
downloadrtems-258bda306ba25741624488d24a2785c055a9fab0.tar.bz2
testsuite: Add a common test configuration. Fix configure.ac and Makefile.am errors.
- Add a top level test configuration file for test states that are common to all BSPs. This saves adding a test configuration (tcfg) file for every BSP. - Add the test states 'user-input' and 'benchmark'. This lets 'rtems-test' stop the test rather than waiting for a timeout or letting a benchmark run without the user asking for it to run. - Implement rtems-test-check in Python to make it faster. The shell script had grown to a point it was noticably slowing the build down. - Fix the configure.ac and Makefile.am files for a number of the test directories. The files are difficiult to keep in sync with the number of tests and mistakes can happen such as tests being left out of the build. The test fsrofs01 is an example. Also a there was a mix of SUBDIRS and _SUBDIRS being used and only _SUBDIRS should be used. - Fix the test fsrofs01 so it compiles. Closes #2963.
Diffstat (limited to 'tools/build')
-rwxr-xr-xtools/build/rtems-test-check81
-rwxr-xr-xtools/build/rtems-test-check-py116
-rwxr-xr-xtools/build/rtems-testsuite-autostuff49
3 files changed, 230 insertions, 16 deletions
diff --git a/tools/build/rtems-test-check b/tools/build/rtems-test-check
index 923af49306..988556d59e 100755
--- a/tools/build/rtems-test-check
+++ b/tools/build/rtems-test-check
@@ -53,17 +53,29 @@ case ${mode} in
esac
#
+# Read the common settings first.
+#
+if [ -f $includepath/testdata/rtems.tcfg ]; then
+ testdata="$includepath/testdata/rtems.tcfg $testdata"
+fi
+
+#
# If there is no testdata all tests are valid and must pass.
#
-if test -f $testdata; then
+if [ ! -z "$testdata" ]; then
excluded_tests=""
expected_fails=""
+ user_inputs=""
indeterminates=""
+ benchmarks=""
while [ ! -z "$testdata" ];
do
for td in $testdata;
do
+ if [ ! -f $td ]; then
+ continue
+ fi
ntd=""
exec 3<& 0
exec 0<$td
@@ -87,10 +99,18 @@ if test -f $testdata; then
line=$(echo $line | sed -e 's/expected-fail://g;s/[[:blank:]]//g')
expected_fails="${expected_fails} $line"
;;
+ user-input)
+ line=$(echo $line | sed -e 's/user-input://g;s/[[:blank:]]//g')
+ user_inputs="${user_inputs} $line"
+ ;;
indeterminate)
line=$(echo $line | sed -e 's/indeterminate://g;s/[[:blank:]]//g')
indeterminates="${indeterminates} $line"
;;
+ benchmark)
+ line=$(echo $line | sed -e 's/benchmark://g;s/[[:blank:]]//g')
+ benchmarks="${benchmarks} $line"
+ ;;
*)
echo "error: invalid test state: $state in $td" 1>&2
echo "INVALID-TEST-DATA"
@@ -119,26 +139,55 @@ if test -f $testdata; then
fi
;;
flags)
- allow="no"
- for et in ${expected_fails};
+ allow="yes"
+ for et in ${excluded_tests};
do
if test ${t} = ${et}; then
- allow="yes"
+ allow="no"
fi
done
- if test ${allow} = yes; then
- output="-DTEST_STATE_EXPECTED_FAIL=1"
- fi
- allow="no"
- for it in ${indeterminates};
- do
- if test ${t} = ${it}; then
- allow="yes"
+ if test ${allow} = yes; then
+ allow="no"
+ for et in ${expected_fails};
+ do
+ if test ${t} = ${et}; then
+ allow="yes"
+ fi
+ done
+ if test ${allow} = yes; then
+ output="-DTEST_STATE_EXPECTED_FAIL=1"
fi
- done
- if test ${allow} = yes; then
- output="${output} -DTEST_STATE_INDETERMINATE=1"
- fi
+ allow="no"
+ for ut in ${user_inputs};
+ do
+ if test ${t} = ${ut}; then
+ allow="yes"
+ fi
+ done
+ if test ${allow} = yes; then
+ output="-DTEST_STATE_USER_INPUT=1"
+ fi
+ allow="no"
+ for it in ${indeterminates};
+ do
+ if test ${t} = ${it}; then
+ allow="yes"
+ fi
+ done
+ if test ${allow} = yes; then
+ output="${output} -DTEST_STATE_INDETERMINATE=1"
+ fi
+ allow="no"
+ for bt in ${benchmarks};
+ do
+ if test ${t} = ${bt}; then
+ allow="yes"
+ fi
+ done
+ if test ${allow} = yes; then
+ output="${output} -DTEST_STATE_BENCHMARK=1"
+ fi
+ fi
;;
*)
echo "error: invalid mode" 1>&2
diff --git a/tools/build/rtems-test-check-py b/tools/build/rtems-test-check-py
new file mode 100755
index 0000000000..e6bf29b7cf
--- /dev/null
+++ b/tools/build/rtems-test-check-py
@@ -0,0 +1,116 @@
+#! /usr/bin/env python
+#
+# Copyright 2017 Chris Johns <chrisj@rtems.org>
+# All rights reserved
+#
+
+#
+# Python version the rtems-test-check script.
+#
+
+from __future__ import print_function
+import os.path
+import sys
+
+def eprint(*args, **kwargs):
+ print(*args, file=sys.stderr, **kwargs)
+
+#
+# Arguments. Keep it simple.
+#
+if len(sys.argv) < 4:
+ eprint('error: invalid command line')
+ print('INVALID-TEST-DATA')
+ sys.exit(2)
+
+mode = sys.argv[1]
+testconfig = [sys.argv[2]]
+includepath = sys.argv[3]
+bsp = sys.argv[4]
+tests = sys.argv[5:]
+
+#
+# Handle the modes.
+#
+if mode == 'exclude':
+ pass
+elif mode == 'flags':
+ if len(tests) != 1:
+ eprint('error: test count not 1 for mode: %s' % (mode))
+ print('INVALID-TEST-DATA')
+ sys.exit(1)
+else:
+ eprint('error: invalid mode: %s' % (mode))
+ print('INVALID-TEST-DATA')
+ sys.exit(1)
+
+#
+# Common RTEMS testsuite configuration. Load first.
+#
+rtems_testdata = os.path.join(includepath, 'testdata', 'rtems.tcfg')
+if os.path.exists(rtems_testdata):
+ testconfig.insert(0, rtems_testdata)
+
+states = ['exclude',
+ 'expected-fail',
+ 'user-input',
+ 'indeterminate',
+ 'benchmark']
+defines = { 'expected-fail' : '-DTEST_STATE_EXPECTED_FAIL=1',
+ 'user-input' : '-DTEST_STATE_USER_INPUT=1',
+ 'indeterminate' : '-DTEST_STATE_INDETERMINATE=1',
+ 'benchmark' : '-DTEST_STATE_BENCHMARK=1' }
+output = []
+testdata = {}
+
+def clean(line):
+ line = line[0:-1]
+ b = line.find('#')
+ if b >= 0:
+ line = line[1:b]
+ return line.strip()
+
+#
+# Load the test data.
+#
+for tc in range(0, len(testconfig)):
+ if not os.path.exists(testconfig[tc]):
+ continue
+ with open(testconfig[tc]) as f:
+ tdata = [clean(l) for l in f.readlines()]
+ lc = 0
+ for line in tdata:
+ lc += 1
+ ls = [s.strip() for s in line.split(':')]
+ if len(line) == 0:
+ continue
+ if len(ls) != 2:
+ eprint('error: syntax error: %s:%d' % (tc, lc))
+ print('INVALID-TEST-DATA')
+ sys.exit(1)
+ state = ls[0]
+ test = ls[1]
+ if state == 'include':
+ testconfig.insert(td, test)
+ elif state in states:
+ if state not in testdata:
+ testdata[state] = [test]
+ else:
+ testdata[state] += [test]
+ else:
+ eprint('error: invalid test state: %s in %s:%d' % (state, tc, lc))
+ print('INVALID-TEST-DATA')
+ sys.exit(1)
+
+for test in tests:
+ if mode == 'exclude':
+ if 'exclude' not in testdata or test not in testdata['exclude']:
+ output += [test]
+ elif mode == 'flags':
+ for state in states:
+ if state != 'exclude' and state in testdata and test in testdata[state]:
+ output += [defines[state]]
+
+print(' '.join(sorted(set(output))))
+
+sys.exit(0)
diff --git a/tools/build/rtems-testsuite-autostuff b/tools/build/rtems-testsuite-autostuff
new file mode 100755
index 0000000000..8f298bac3c
--- /dev/null
+++ b/tools/build/rtems-testsuite-autostuff
@@ -0,0 +1,49 @@
+#! /usr/bin/env python
+
+#
+# Copyright 2017 Chris Johns <chrisj@rtems.org>
+# All rights reserved
+#
+
+#
+# Create the testsuite's configure.am and Makefile.am from the directories
+# found. This does not handle any conditional functionality that may be needed.
+#
+
+from __future__ import print_function
+import os
+import os.path
+import sys
+
+def eprint(*args, **kwargs):
+ print(*args, file = sys.stderr, **kwargs)
+
+def die(*args, **kwargs):
+ print(*args, file = sys.stderr, **kwargs)
+ sys.exit(1)
+
+if len(sys.argv) != 2:
+ die('error: just provide the path to the test directory')
+
+testdir = sys.argv[1]
+
+if not os.path.exists(testdir):
+ die('error: not found: %s' % (testdir))
+if not os.path.isdir(testdir):
+ die('error: not a directory: %s' % (testdir))
+
+excludes = ['autom4te.cache']
+
+tests = sorted([t for t in os.listdir(testdir)
+ if os.path.isdir(os.path.join(testdir, t)) \
+ and t not in excludes \
+ and os.path.exists(os.path.join(testdir, t, 'Makefile.am'))])
+
+configure = ['AC_CONFIG_FILES([Makefile'] + ['%s/Makefile' % (t) for t in tests] + ['])']
+makefile = ['_SUBDIRS ='] + ['_SUBDIRS += %s' % (t) for t in tests]
+
+print(os.linesep.join(configure))
+print()
+print(os.linesep.join(makefile))
+
+sys.exit(0)