With this test we add a new EXPECT_FAILURE: attribute to the script
syntax to indicate that a particular apitrace command in the script is
expected to fail. This requires manually invoking the Popen
constructor rather than using subprocess.check_output, (which is
*close* to what we need but fails to assign the output when raising an
exception).
We also remove the globbing from the CMakeLists.txt file to get a
manual ordering of the tests (rather than executing in alphabetic
order).
-file (GLOB scripts RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.script)
+function (ADD_CLI_TEST)
+ cmake_parse_arguments(
+ TEST
+ # Options
+ ""
+ # One value args
+ "NAME"
+ # Multi value args
+ ""
+ ${ARGN}
+ )
-list (SORT scripts)
-
-foreach (script ${scripts})
if (APITRACE_EXECUTABLE)
add_test(
if (APITRACE_EXECUTABLE)
add_test(
- NAME ${script}
- COMMAND python ${CMAKE_SOURCE_DIR}/cli_driver.py
- --apitrace ${APITRACE_EXECUTABLE}
- --apitrace-source ${APITRACE_SOURCE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/${script}
+ NAME ${TEST_NAME}
+ COMMAND
+ python ${CMAKE_SOURCE_DIR}/cli_driver.py
+ --apitrace ${APITRACE_EXECUTABLE}
+ --apitrace-source ${APITRACE_SOURCE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_NAME}
+endfunction ()
+
+add_cli_test(NAME "cli-diff-images.script")
+add_cli_test(NAME "cli-diff-images-mismatch.script")
written in this directory.
The tests in this directory are found in files with names matching
written in this directory.
The tests in this directory are found in files with names matching
-*.script. The scripts are simple line-based commands with the
-following meanings based on the first word of each line:
+*.script by convention. The scripts must be listed explicitly in the
+CMakeLists.txt file. Each script consists of simple line-based
+commands with the following meanings (based on the first word of each
+line):
apitrace: Execute the current apitrace executable being tested
with the given arguments. If apitrace returns a
apitrace: Execute the current apitrace executable being tested
with the given arguments. If apitrace returns a
interpreted locally. If this fails for any reason
other than "file does not exist" the test will fail.
interpreted locally. If this fails for any reason
other than "file does not exist" the test will fail.
-If none of the commands in the script cause the test to fail, then the
-test will pass.
+Commands can be prefixed with "EXPECT_FAILURE:" to indicate that a
+command is expected to return a non-zero value. In this case, a return
+value of zero from the command will cause the test to fail.
+
+If none of the commands in the script cause the test to fail (as
+described above), then the test will pass.
--- /dev/null
+rm_and_mkdir ./tri-out
+apitrace dump-images -o ./tri-out/tri tri.trace
+EXPECT_FAILURE: apitrace diff-images -v ./tri-ref-mismatch ./tri-out
+expect "Comparing ./tri-ref-mismatch/tri0000000027.png and ./tri-out/tri0000000027.png ... MISMATCH\n"
def do_apitrace(self, args):
cmd = [self.options.apitrace] + args[1:]
def do_apitrace(self, args):
cmd = [self.options.apitrace] + args[1:]
- self.output = subprocess.check_output(cmd)
+ proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
+ self.output = proc.communicate()[0]
+
+ proc.wait()
+
+ if (self.expect_failure):
+ if (proc.returncode == 0):
+ fail("Command unexpectedly passed when expecting failure:\n " + " ".join(cmd))
+ else:
+ if (proc.returncode != 0):
+ fail("Command failed (returned non-zero):\n " + " ".join(cmd))
def do_expect(self, args):
expected = json.loads(" ".join(args[1:]))
def do_expect(self, args):
expected = json.loads(" ".join(args[1:]))
script = open(cli_script, 'rt')
while True:
script = open(cli_script, 'rt')
while True:
+
+ self.expect_failure = False
+
line = script.readline()
if (line == ''):
line = script.readline()
if (line == ''):
if (len(cmd) == 0):
continue
if (len(cmd) == 0):
continue
+ if (cmd[0] == 'EXPECT_FAILURE:'):
+ self.expect_failure = True
+ cmd.pop(0)
+
commands.get(cmd[0], self.unknown_command)(cmd)
def run(self):
commands.get(cmd[0], self.unknown_command)(cmd)
def run(self):