# Ubuntu Testing Automation Harness
# Copyright 2012 Canonical Ltd.
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Testsuite specific code."""
import jsonschema
import os
import yaml
from utah import logger
from utah.client import exceptions
from utah.client.common import (
CMD_TS_BUILD,
CMD_TS_CLEANUP,
CMD_TS_SETUP,
DEFAULT_TSCONTROL,
DEFAULT_TSLIST,
chdir,
do_nothing,
mkdir,
parse_control_file,
run_cmd,
)
from utah.client.testcase import TestCase
def _parse_runlist_file(runlist_file, schema):
"""Parse a tslist.run runlist file and check against schema.
:returns: Parsed data from the runlist
:rtype: dict
"""
if not os.path.exists(runlist_file):
raise exceptions.MissingFile('File not found: {}'.format(runlist_file))
with open(runlist_file, 'r') as fp:
data = yaml.load(fp)
try:
jsonschema.validate(data, schema)
except jsonschema.ValidationError as exception:
raise exceptions.ValidationError(
'Test suite runlist invalid: {!r}\n'
'Detailed information: {}'.format(runlist_file, exception))
return data
[docs]class TestSuite(object):
"""Base class describing a test suite."""
build_cmd = None
timeout = None
ts_setup = None
ts_cleanup = None
control_file = None
# status is one of:
# 'NOTRUN', 'BUILD', 'SETUP', 'INPROGRESS', 'CLEANUP', and 'DONE'
status = "NOTRUN"
save_state_callback = do_nothing
RUNLIST_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'array',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'test': {'type': 'string'},
'timeout': {
'type': 'integer',
'minimum': 0,
},
'build_cmd': {'type': 'string'},
'command': {'type': 'string'},
'run_as': {'type': 'string'},
'overrides': {
'type': 'object',
'properties': {
'timeout': {
'type': 'integer',
'minimum': 0,
},
'build_cmd': {'type': 'string'},
'command': {'type': 'string'},
'run_as': {'type': 'string'},
},
'minProperties': 1,
'additionalProperties': False,
},
},
'required': ['test'],
'additionalProperties': False,
},
}
CONTROL_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'properties': {
'timeout': {
'type': 'integer',
'minimum': 0,
},
'build_cmd': {'type': 'string'},
'ts_setup': {'type': 'string'},
'ts_cleanup': {'type': 'string'},
},
'additionalProperties': False,
}
def __init__(self, name, runner,
runlist_file=DEFAULT_TSLIST, includes=None, excludes=None,
skip_runlist=False):
self.path = os.path.join(runner.testsuitedir, name)
self.runner = runner
self.timeout = runner.timeout
self.name = name
self.tests = []
self.passes = self.failures = self.errors = 0
mkdir(self.path)
chdir(self.path)
control_file = os.path.join(name, DEFAULT_TSCONTROL)
if os.path.exists(control_file):
self.control_file = control_file
self.runlist_file = os.path.join(name, runlist_file)
self._initialize_control_data(control_file)
if not skip_runlist:
tests = _parse_runlist_file(self.runlist_file, self.RUNLIST_SCHEMA)
for test in tests:
self._add_test(test, includes, excludes)
def _initialize_control_data(self, control_file):
if self.control_file is not None:
try:
control_data = parse_control_file(self.control_file,
self.CONTROL_SCHEMA)
if self.timeout is not None:
control_data['timeout'] = self.timeout
self.__dict__.update(control_data)
except exceptions.YAMLEmptyFile:
# Skip schema validation for empty file
# Schema allows an empty dictionary, but not None
control_data = None
except jsonschema.ValidationError as exception:
raise exceptions.ValidationError(
'{!r} test suite control file invalid: {!r}\n'
'Detailed information: {}'
.format(self.name, self.control_file, exception))
def _add_test(self, test, incs, excs):
name = test['test']
if (incs and name not in incs) or (excs and name in excs):
return
tc = TestCase(name, self, test.get('command'))
runlist_properties = self.RUNLIST_SCHEMA['items']['properties']
override_properties = \
runlist_properties['overrides']['properties'].keys()
overrides = {}
for property_name in override_properties:
if property_name in test:
overrides[property_name] = test[property_name]
if 'overrides' in test:
if self.timeout is not None:
test['overrides']['timeout'] = self.timeout
overrides.update(test['overrides'])
tc.process_overrides(overrides)
self.tests.append(tc)
def __str__(self):
return "{}: {}".format(self.control_file, self.runlist_file)
[docs] def set_status(self, status):
"""Set the status and call the save state callback."""
self.status = status
self.runner.save_state()
[docs] def build(self):
"""Run build, but only if we haven't started a run yet."""
if self.status == 'NOTRUN':
self.set_status('BUILD')
logger.log('Testsuite {} build'.format(self.name))
cmd_result = run_cmd(
self.build_cmd, cwd=self.name, cmd_type=CMD_TS_BUILD)
self.runner.result.add_result(cmd_result)
[docs] def setup(self):
"""Run ts_setup, but only if build() has just passed."""
if self.status == 'BUILD' and self.runner.result.status == 'PASS':
self.set_status('SETUP')
logger.log('Testsuite {} setup'.format(self.name))
cmd_result = run_cmd(
self.ts_setup, cwd=self.name, cmd_type=CMD_TS_SETUP)
self.runner.result.add_result(cmd_result)
[docs] def cleanup(self):
"""Run ts_cleanup after a run."""
if self.status == 'INPROGRESS':
self.set_status('CLEANUP')
logger.log('Testsuite {} cleanup'.format(self.name))
cmd_result = run_cmd(
self.ts_cleanup, cwd=self.name, cmd_type=CMD_TS_CLEANUP)
self.runner.result.add_result(cmd_result)
[docs] def run(self, rerun=False):
"""Run the complete test suite.
This includes any build, setup, and cleanup commands, as well as all
test cases (including build, setup, and cleanup.)
:returns: Whether to keep running tests (True) or reboot (False)
:rtype: bool
"""
# we gather this info from the testcases
self.passes = self.errors = self.failures = 0
# Return value to indicate whether processing of a Runner should
# continue. This is to avoid a shutdown race on reboot cases.
keep_going = True
# Work from the testsuite directory
chdir(self.path)
# keep track of which testsuite we are running tests in
self.runner.result.testsuite = self.path
if not rerun:
self.build()
self.setup()
# Always enter this loop since test.run() will handle checking if
# the test has already been run.
self.status = "INPROGRESS"
if self.runner.result.status == 'PASS':
for test in self.tests:
keep_going = test.run(rerun)
if test.summary == 'PASS':
self.passes += 1
elif test.summary == 'FAIL':
self.failures += 1
elif test.summary == 'ERROR':
self.errors += 1
if not keep_going:
return keep_going
self.cleanup()
if self.status == 'CLEANUP':
self.set_status("DONE")
return keep_going
[docs] def add_test(self, tests):
"""Add a single test or list of tests to this suite."""
if isinstance(tests, TestCase):
self.tests.append(tests)
else:
try:
for test in tests:
self.add_test(test)
except TypeError:
pass
[docs] def count_tests(self):
"""Return the number of test cases in the suite."""
return len(self.tests)
[docs] def load_state(self, state):
"""Restore our state from the supplied dictionary.
Requires that the fieldnames in the dictionary match the class
properties.
"""
# TODO: do this explicitly
self.__dict__.update(state)
self.tests = []
for state_test in state['tests']:
test = TestCase(state_test['name'], self)
test.load_state(state_test)
self.tests.append(test)
[docs] def save_state(self):
"""Return a dictionary representing the suite's state."""
state = {
'name': self.name,
'status': self.status,
'passes': self.passes,
'failures': self.failures,
'errors': self.errors,
'tests': [],
'ts_setup': self.ts_setup,
'ts_cleanup': self.ts_cleanup,
'build_cmd': self.build_cmd,
'timeout': self.timeout,
}
for test in self.tests:
state['tests'].append(test.save_state())
return state
[docs] def is_done(self):
"""Determine if the suite is done.
This might mean that something has failed.
Used by Runner to determine if the suite needs to be re-run on resume.
:returns: Whether the test suite is finished (done or cleaned up)
:rtype: bool
"""
return self.status == 'DONE' or self.status == 'CLEANUP'
[docs] def get_next_test(self):
"""Return the next test to be run.
Mainly used for debugging.
"""
test = None
for t in self.tests:
if not t.is_done():
test = t
break
return test
[docs]class DynamicTestSuite(TestSuite):
"""A dynamic test suite.
This class contructs itself based on a tslist.auto file that
builds the list of test cases it should execute at runtime.
"""
RUNLIST_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'array',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'discovery_cmd': {'type': 'string'},
'test_cmd': {'type': 'string'},
'timeout': {
'type': 'integer',
'minimum': 0,
},
'run_as': {'type': 'string'},
},
'required': ['discovery_cmd', 'test_cmd'],
'additionalProperties': False,
},
}
def __init__(self, name, runner, autolist):
"""Create an empty testsuite.
This will get populated after setup is successfully run.
"""
TestSuite.__init__(self, name, runner, skip_runlist=True)
autolist = os.path.join(self.path, name, autolist)
self.patterns = _parse_runlist_file(autolist, self.RUNLIST_SCHEMA)
def _add_dynamic(self, pattern, name):
# the test name can be passed from the cmd with either:
# {} or {test}
cmd = pattern['test_cmd'].format(name, test=name)
tc_control = os.path.join(self.path, self.name, name, 'tc_control')
mkdir(os.path.dirname(tc_control))
with open(tc_control, 'w') as f:
f.write('description: "{}"\n'.format(name))
f.write('dependencies: None\n')
f.write('action: Execute {}\n'.format(cmd))
f.write('expected_results: Zero return code from command\n')
f.write('command: {}\n'.format(cmd))
tc = TestCase(name, self, cmd)
self.tests.append(tc)
def _add_tests(self, pattern):
cmd = pattern['discovery_cmd']
result = run_cmd(cmd, None, cmd_type=CMD_TS_BUILD)
self.runner.result.add_result(result)
if result['returncode']:
return
for t in result['stdout'].split('\n'):
t = t.strip()
if t:
self._add_dynamic(pattern, t)
[docs] def setup(self):
"""Implement dynamic portion of TestSuite.setup."""
TestSuite.setup(self)
if self.runner.result.status == 'PASS':
for pat in self.patterns:
self._add_tests(pat)
[docs] def save_state(self):
"""Implement dynamic portion of TestSuite.save_state.
:return: state object
:rtype: dict
"""
state = TestSuite.save_state(self)
state['patterns'] = self.patterns
return state