Commit 0895f561 authored by Dylan Baker's avatar Dylan Baker

log: add verbose logger

This adds a verbose logger option, similar to that of the old piglit
logger, but with some of the flair of the new one.

v2: - Use different method to set Log.__string
    - fix typo env.concurrent to correct env.verbose
    - resume uses stored verbosity settings
Signed-off-by: Dylan Baker's avatarDylan Baker <baker.dylan.c@gmail.com>
Reviewed-by: Ilia Mirkin's avatarIlia Mirkin <imirkin@alum.mit.edu>
parent e0169c44
......@@ -355,7 +355,7 @@ class TestrunResult:
class Environment:
def __init__(self, concurrent=True, execute=True, include_filter=[],
exclude_filter=[], valgrind=False, dmesg=False):
exclude_filter=[], valgrind=False, dmesg=False, verbose=False):
self.concurrent = concurrent
self.execute = execute
self.filter = []
......@@ -363,6 +363,7 @@ class Environment:
self.exclude_tests = set()
self.valgrind = valgrind
self.dmesg = dmesg
self.verbose = verbose
"""
The filter lists that are read in should be a list of string objects,
......@@ -433,12 +434,11 @@ class Test(object):
Fully qualified test name as a string. For example,
``spec/glsl-1.30/preprocessor/compiler/keywords/void.frag``.
'''
log_current = log.pre_log(path if env.verbose else None)
log_current = log.pre_log()
# Run the test
if env.execute:
try:
log.log()
time_start = time.time()
dmesg.update_dmesg()
self._test_hook_execute_run()
......@@ -462,7 +462,9 @@ class Test(object):
result['traceback'] = \
"".join(traceback.format_tb(sys.exc_info()[2]))
test_result = result['result']
log.log(path, result['result'])
log.post_log(log_current, result['result'])
if 'subtest' in result and len(result['subtest']) > 1:
for test in result['subtest']:
result['result'] = result['subtest'][test]
......@@ -470,9 +472,8 @@ class Test(object):
else:
json_writer.write_dict_item(path, result)
else:
test_result = 'dry-run'
log.log()
log.post_log(log_current, test_result)
log.log(path, 'dry-run')
log.post_log(log_current, 'dry-run')
class Group(dict):
......@@ -557,7 +558,7 @@ class TestProfile(object):
'''
self.prepare_test_list(env)
log = Log(len(self.test_list))
log = Log(len(self.test_list), env.verbose)
def test(pair):
""" Function to call test.execute from .map
......
......@@ -32,7 +32,7 @@ class Log(object):
total -- The total number of tests to run.
"""
def __init__(self, total):
def __init__(self, total, verbose):
self.__total = total
self.__complete = 0
self.__running = []
......@@ -42,17 +42,37 @@ class Log(object):
'dmesg-warn', 'dmesg-fail', 'dry-run'])
self.__summary = collections.defaultdict(lambda: 0)
self.__output = "[{percent}] {summary} {running}\r"
if verbose:
self.__output = "{result} :: {name}\n" + self.__output
def _summary(self):
""" return a summary of the statuses """
return ", ".join("{0}: {1}".format(k, self.__summary[k])
for k in sorted(self.__summary))
def _running(self):
""" return running tests """
return "Running Test(s): {}".format(
" ".join([str(x).zfill(self.__pad) for x in self.__running]))
def _percent(self):
return "[{0}/{1}]".format(str(self.__complete).zfill(self.__pad),
str(self.__total).zfill(self.__pad))
""" return the percentage of tess completed """
return "{0}/{1}".format(str(self.__complete).zfill(self.__pad),
str(self.__total).zfill(self.__pad))
def __print(self, name, result):
""" Do the actual printing """
sys.stdout.write(self.__output.format(**{'percent': self._percent(),
'running': self._running(),
'summary': self._summary(),
'name': name,
'result': result}))
# Need to flush explicitly, otherwise it all gets buffered without a
# newline.
sys.stdout.flush()
@synchronized_self
def post_log(self, value, result):
......@@ -74,23 +94,31 @@ class Log(object):
self.__summary[result] += 1
@synchronized_self
def log(self):
def log(self, name, result):
""" Print to the screen
Works by moving the cursor back to the front of the line and printing
over it.
"""
sys.stdout.write("{0} {1} {2}\r".format(
self._percent(), self._summary(), self._running()))
# Need to flush explicitly, otherwise it all gets buffered without a
# newline.
sys.stdout.flush()
"""
assert result in self.__summary_keys
self.__print(name, result)
@synchronized_self
def pre_log(self):
""" Returns a new number to know what processes are running """
def pre_log(self, running=None):
""" Hook to run before log()
Returns a new number to know what processes are running, if running is
set it will print a running message for the test
Keyword Arguments:
running -- the name of a test to print is running. If Falsy then
nothing will be printed. Default: None
"""
if running:
self.__print(running, 'running')
x = self.__generator.next()
self.__running.append(x)
return x
......@@ -29,15 +29,21 @@ from framework.log import Log
valid_statuses = ('pass', 'fail', 'crash', 'warn', 'dmesg-warn',
'dmesg-fail', 'skip', 'dry-run')
def test_initialize_log():
""" Test that Log initializes with """
log = Log(100)
def test_initialize_log_terse():
""" Test that Log initializes with verbose=False """
log = Log(100, False)
assert log
def test_initialize_log_verbose():
""" Test that Log initializes with verbose=True """
log = Log(100, True)
assert log
def test_pre_log_return():
""" Test that pre_log returns a number """
log = Log(100)
log = Log(100, False)
ret = log.pre_log()
nt.assert_true(isinstance(ret, (IntType, FloatType, LongType)),
......@@ -46,7 +52,7 @@ def test_pre_log_return():
def test_post_log_increment_complete():
""" Tests that Log.post_log() increments self.__complete """
log = Log(100)
log = Log(100, False)
ret = log.pre_log()
log.post_log(ret, 'pass')
nt.assert_equal(log._Log__complete, 1,
......@@ -56,7 +62,7 @@ def test_post_log_increment_complete():
def check_post_log_increment_summary(stat):
""" Test that passing a result to post_log works correctly """
log = Log(100)
log = Log(100, False)
ret = log.pre_log()
log.post_log(ret, stat)
print log._Log__summary
......@@ -77,7 +83,7 @@ def test_post_log_increment_summary():
def test_post_log_removes_complete():
""" Test that Log.post_log() removes finished tests from __running """
log = Log(100)
log = Log(100, False)
ret = log.pre_log()
log.post_log(ret, 'pass')
nt.assert_not_in(ret, log._Log__running,
......@@ -87,6 +93,6 @@ def test_post_log_removes_complete():
@nt.raises(AssertionError)
def test_post_log_increment_summary_bad():
""" Only statuses in self.__summary_keys are valid for post_log """
log = Log(100)
log = Log(100, False)
ret = log.pre_log()
log.post_log(ret, 'fails')
......@@ -43,7 +43,8 @@ def main():
include_filter=results.options['filter'],
execute=results.options['execute'],
valgrind=results.options['valgrind'],
dmesg=results.options['dmesg'])
dmesg=results.options['dmesg'],
verbose=results.options['verbose'])
# Change working directory to the piglit directory
os.chdir(path.dirname(path.realpath(sys.argv[0])))
......
......@@ -82,6 +82,10 @@ def main():
action="store_true",
help="Capture a difference in dmesg before and "
"after each test. Implies -1/--no-concurrency")
parser.add_argument("-v", "--verbose",
action="store_true",
help="Produce a line of output for each test before "
"and after it runs")
parser.add_argument("test_profile",
metavar="<Path to one or more test profile(s)>",
nargs='+',
......@@ -115,7 +119,8 @@ def main():
include_filter=args.include_tests,
execute=args.execute,
valgrind=args.valgrind,
dmesg=args.dmesg)
dmesg=args.dmesg,
verbose=args.verbose)
# Change working directory to the root of the piglit directory
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment