# Ubuntu Testing Automation Harness
# Copyright 2012 Canonical Ltd.
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Testcase specific code."""
import os
import jsonschema
from utah import logger
from utah.client.common import (
CMD_TC_BUILD,
CMD_TC_CLEANUP,
CMD_TC_SETUP,
CMD_TC_TEST,
do_nothing,
parse_control_file,
run_cmd,
)
from utah.client.exceptions import (
MissingFile,
ValidationError,
)
[docs]class TestCase(object):
"""Base class describing a test case.
status is one of 'NOTRUN', 'BUILD', 'SETUP', 'RUN', 'CLEANUP', or 'DONE'
"""
status = 'NOTRUN'
summary = 'PASS' # 'PASS', 'FAIL', of 'ERROR'
build_cmd = None
tc_setup = None
tc_cleanup = None
reboot_callback = None
save_state_callback = do_nothing
type = 'userland'
CONTROL_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'properties': {
'description': {'type': 'string'},
'dependencies': {'type': 'string'},
'action': {'type': 'string'},
'expected_results': {'type': 'string'},
'type': {
'type': 'string',
'enum': ['userland']
},
'timeout': {
'type': 'integer',
'minimum': 0,
},
'build_cmd': {'type': 'string'},
'command': {'type': 'string'},
'run_as': {'type': 'string'},
'tc_setup': {'type': 'string'},
'tc_cleanup': {'type': 'string'},
'reboot': {
'type': 'string',
'enum': ['always', 'pass', 'never'],
'default': 'never',
},
},
'required': [
'description',
'dependencies',
'action',
'expected_results',
'command',
],
'minProperties': 1,
}
def __init__(self, name, suite, command=None):
"""Build a TestCase from a control file's data."""
self.name = name
self.suite = suite
self.path = os.path.join(suite.path, suite.name, name)
self.working_dir = self.path
self.timeout = suite.timeout
self.run_as = None
# The following are testcase documentation and should all be provided
# in a tc_control file or via the state file.
self.description = None
self.dependencies = None
self.action = None
self.expected_results = None
tcfile = os.path.join(self.path, 'tc_control')
if os.path.exists(tcfile):
try:
control_data = parse_control_file(tcfile, self.CONTROL_SCHEMA)
if self.timeout is not None:
control_data['timeout'] = self.timeout
for key, value in control_data.iteritems():
setattr(self, key, value)
except jsonschema.ValidationError as exception:
raise ValidationError(
'{!r} test case control file invalid: {!r}\n'
'Detailed information: {}'
.format(self.name, tcfile, exception))
else:
raise MissingFile(
'Control file, {}, does not exist.'.format(tcfile))
if self.timeout is None:
self.timeout = 0
if command:
self.command = command
def __str__(self):
return ("{}: {}, {}, {}".format(
self.name, self.description, self.command, self.timeout))
[docs] def set_status(self, status):
"""Set the status and call the save state callback."""
self.status = status
self.suite.runner.save_state()
[docs] def build(self):
"""Run build, but only if we haven't started a run yet."""
if self.status == 'NOTRUN':
self.set_status('BUILD')
logger.log('Testcase {} build'.format(self.name))
cmd_result = run_cmd(
self.build_cmd, cwd=self.working_dir, cmd_type=CMD_TC_BUILD)
self.suite.runner.result.add_result(cmd_result)
[docs] def setup(self):
"""Run tc_setup, but only if build() has just passed."""
if self.status == 'BUILD' and \
self.suite.runner.result.status == 'PASS':
self.set_status('SETUP')
logger.log('Testcase {} setup'.format(self.name))
cmd_result = run_cmd(
self.tc_setup, cwd=self.working_dir, cmd_type=CMD_TC_SETUP)
self.suite.runner.result.add_result(cmd_result)
[docs] def cleanup(self):
"""Run tc_cleanup after a run."""
if self.status == 'RUN':
self.set_status('CLEANUP')
logger.log('Testcase {} cleanup'.format(self.name))
cmd_result = run_cmd(
self.tc_cleanup, cwd=self.working_dir, cmd_type=CMD_TC_CLEANUP)
self.suite.runner.result.add_result(cmd_result)
[docs] def run(self, rerun=False):
"""Run the complete test case.
This includes any build, setup, and cleanup commands.
:returns: Whether to keep running tests (True) or reboot (False)
:rtype: bool
"""
if rerun:
# reruns mean we should re-run the setup but skip the build step
self.set_status('BUILD')
elif self.is_done():
return 'PASS'
# Return value to indicate whether processing of a TestSuite should
# continue. This is to avoid a shutdown race on reboot cases.
keep_going = True
self.suite.runner.result.testcase = self.name
self.build()
self.setup()
# if the setup fails don't run the test command
if self.suite.runner.result.status != 'PASS':
self.summary = 'ERROR'
self.suite.runner.result.status = 'PASS'
return self.suite.runner.result.status
if self.status == 'SETUP':
timeout = self.timeout or 0
self.status = "RUN"
self.suite.runner.save_state()
extra_info = {
'description': self.description,
'dependencies': self.dependencies,
'action': self.action,
'expected_results': self.expected_results,
}
logger.log('Testcase {} run'.format(self.name))
cmd_result = run_cmd(
self.command,
timeout=timeout,
cwd=self.working_dir,
cmd_type=CMD_TC_TEST,
run_as=self.run_as
)
self.suite.runner.result.add_result(
cmd_result, extra_info=extra_info)
# only if we haven't failed or errored so far do
# we set the summary for the testcase.
if self.summary == 'PASS':
self.summary = self.suite.runner.result.status
# Clean up whether 'command' failed or not.
self.cleanup()
need_reboot = False
if (
self.reboot == 'always' or
(self.reboot == 'pass' and
self.suite.runner.result.status == 'PASS')
):
need_reboot = True
self.suite.runner.result.status = 'PASS'
if self.status == 'CLEANUP':
self.status = "DONE"
self.suite.runner.save_state()
if need_reboot and self.suite.runner.reboot is not None:
self.suite.runner.reboot()
keep_going = False
return keep_going
[docs] def process_overrides(self, overrides):
"""Set override values from a TestSuite runlist for this test case."""
for key, value in overrides.iteritems():
setattr(self, key, value)
[docs] def load_state(self, state):
"""Restore state from the supplied dictionary.
Requires that 'state' has the same fieldnames as the TestCase class.
"""
for key, value in state.iteritems():
setattr(self, key, value)
[docs] def save_state(self):
"""Return a dictionary representing the test's state."""
state = {
'name': self.name,
'path': self.path,
'command': self.command,
'timeout': self.timeout,
'status': self.status,
'summary': self.summary,
'build_cmd': self.build_cmd,
'tc_setup': self.tc_setup,
'tc_cleanup': self.tc_cleanup,
'type': self.type,
'description': self.description,
'dependencies': self.dependencies,
'action': self.action,
'expected_results': self.expected_results,
}
return state
[docs] def is_done(self):
"""Determine if the case is done.
This might mean that something has failed.
Used by suite to determine if the suite needs to be re-run on resume.
:returns: Whether the test case is finished (done or cleaned up)
:rtype: bool
"""
return self.status == 'DONE' or self.status == 'CLEANUP'