Commit b9c4f967 authored by Dylan Baker's avatar Dylan Baker Committed by Eric Anholt

python: Convert tabs to spaces

PEP 8 specifies that all indents should be either 4 spaces or an
equivalent tab indent, but without mixing tabs and spaces, with a
preference for spaces over tabs because they are absolute. Tabs and
spaces should not be mixed because they lead to expected indent level
errors.

Currently piglit uses a mixture of spaces and tabs, this patch uses the
python tools reindent.py to convert the tabs to spaces.
Signed-off-by: Dylan Baker's avatarDylan Baker <baker.dylan.c@gmail.com>
v2: Rebase against master, fix apparent unintentional unindentation of
    help text in piglit-summary.py (changes by anholt)
Reviewed-by: Eric Anholt's avatarEric Anholt <eric@anholt.net>
parent 80071932
......@@ -41,143 +41,143 @@ from threads import synchronized_self
import threading
__all__ = [
'Environment',
'checkDir',
'loadTestProfile',
'TestrunResult',
'GroupResult',
'TestResult',
'TestProfile',
'Group',
'Test',
'testBinDir',
'Environment',
'checkDir',
'loadTestProfile',
'TestrunResult',
'GroupResult',
'TestResult',
'TestProfile',
'Group',
'Test',
'testBinDir',
]
class JSONWriter:
'''
Writes to a JSON file stream
JSONWriter is threadsafe.
Example
-------
This call to ``json.dump``::
json.dump(
{
'a': [1, 2, 3],
'b': 4,
'c': {
'x': 100,
},
}
file,
indent=JSONWriter.INDENT)
is equivalent to::
w = JSONWriter(file)
w.open_dict()
w.write_dict_item('a', [1, 2, 3])
w.write_dict_item('b', 4)
w.write_dict_item('c', {'x': 100})
w.close_dict()
which is also equivalent to::
w = JSONWriter(file)
w.open_dict()
w.write_dict_item('a', [1, 2, 3])
w.write_dict_item('b', 4)
w.write_dict_key('c')
w.open_dict()
w.write_dict_item('x', 100)
w.close_dict()
w.close_dict()
'''
INDENT = 4
def __init__(self, file):
self.file = file
self.__indent_level = 0
self.__inhibit_next_indent = False
self.__encoder = json.JSONEncoder(indent=self.INDENT)
# self.__is_collection_empty
#
# A stack that indicates if the currect collection is empty
#
# When open_dict is called, True is pushed onto the
# stack. When the first element is written to the newly
# opened dict, the top of the stack is set to False.
# When the close_dict is called, the stack is popped.
#
# The top of the stack is element -1.
#
# XXX: How does one attach docstrings to member variables?
#
self.__is_collection_empty = []
@synchronized_self
def __write_indent(self):
if self.__inhibit_next_indent:
self.__inhibit_next_indent = False
return
else:
i = ' ' * self.__indent_level * self.INDENT
self.file.write(i)
@synchronized_self
def __write(self, obj):
lines = list(self.__encoder.encode(obj).split('\n'))
n = len(lines)
for i in range(n):
self.__write_indent()
self.file.write(lines[i])
if i != n - 1:
self.file.write('\n')
@synchronized_self
def open_dict(self):
self.__write_indent()
self.file.write('{')
self.__indent_level += 1
self.__is_collection_empty.append(True)
@synchronized_self
def close_dict(self, comma=True):
self.__indent_level -= 1
self.__is_collection_empty.pop()
self.file.write('\n')
self.__write_indent()
self.file.write('}')
@synchronized_self
def write_dict_item(self, key, value):
# Write key.
self.write_dict_key(key)
# Write value.
self.__indent_level += 1
self.__write(value)
self.__indent_level -= 1
@synchronized_self
def write_dict_key(self, key):
# Write comma if this is not the initial item in the dict.
if self.__is_collection_empty[-1]:
self.__is_collection_empty[-1] = False
else:
self.file.write(',')
self.file.write('\n')
self.__write(key)
self.file.write(': ')
self.__inhibit_next_indent = True
'''
Writes to a JSON file stream
JSONWriter is threadsafe.
Example
-------
This call to ``json.dump``::
json.dump(
{
'a': [1, 2, 3],
'b': 4,
'c': {
'x': 100,
},
}
file,
indent=JSONWriter.INDENT)
is equivalent to::
w = JSONWriter(file)
w.open_dict()
w.write_dict_item('a', [1, 2, 3])
w.write_dict_item('b', 4)
w.write_dict_item('c', {'x': 100})
w.close_dict()
which is also equivalent to::
w = JSONWriter(file)
w.open_dict()
w.write_dict_item('a', [1, 2, 3])
w.write_dict_item('b', 4)
w.write_dict_key('c')
w.open_dict()
w.write_dict_item('x', 100)
w.close_dict()
w.close_dict()
'''
INDENT = 4
def __init__(self, file):
self.file = file
self.__indent_level = 0
self.__inhibit_next_indent = False
self.__encoder = json.JSONEncoder(indent=self.INDENT)
# self.__is_collection_empty
#
# A stack that indicates if the currect collection is empty
#
# When open_dict is called, True is pushed onto the
# stack. When the first element is written to the newly
# opened dict, the top of the stack is set to False.
# When the close_dict is called, the stack is popped.
#
# The top of the stack is element -1.
#
# XXX: How does one attach docstrings to member variables?
#
self.__is_collection_empty = []
@synchronized_self
def __write_indent(self):
if self.__inhibit_next_indent:
self.__inhibit_next_indent = False
return
else:
i = ' ' * self.__indent_level * self.INDENT
self.file.write(i)
@synchronized_self
def __write(self, obj):
lines = list(self.__encoder.encode(obj).split('\n'))
n = len(lines)
for i in range(n):
self.__write_indent()
self.file.write(lines[i])
if i != n - 1:
self.file.write('\n')
@synchronized_self
def open_dict(self):
self.__write_indent()
self.file.write('{')
self.__indent_level += 1
self.__is_collection_empty.append(True)
@synchronized_self
def close_dict(self, comma=True):
self.__indent_level -= 1
self.__is_collection_empty.pop()
self.file.write('\n')
self.__write_indent()
self.file.write('}')
@synchronized_self
def write_dict_item(self, key, value):
# Write key.
self.write_dict_key(key)
# Write value.
self.__indent_level += 1
self.__write(value)
self.__indent_level -= 1
@synchronized_self
def write_dict_key(self, key):
# Write comma if this is not the initial item in the dict.
if self.__is_collection_empty[-1]:
self.__is_collection_empty[-1] = False
else:
self.file.write(',')
self.file.write('\n')
self.__write(key)
self.file.write(': ')
self.__inhibit_next_indent = True
#############################################################################
##### Helper functions
......@@ -185,22 +185,22 @@ class JSONWriter:
# Ensure the given directory exists
def checkDir(dirname, failifexists):
exists = True
try:
os.stat(dirname)
except OSError as e:
if e.errno == errno.ENOENT or e.errno == errno.ENOTDIR:
exists = False
if exists and failifexists:
print >>sys.stderr, "%(dirname)s exists already.\nUse --overwrite if you want to overwrite it.\n" % locals()
exit(1)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
exists = True
try:
os.stat(dirname)
except OSError as e:
if e.errno == errno.ENOENT or e.errno == errno.ENOTDIR:
exists = False
if exists and failifexists:
print >>sys.stderr, "%(dirname)s exists already.\nUse --overwrite if you want to overwrite it.\n" % locals()
exit(1)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if 'PIGLIT_BUILD_DIR' in os.environ:
testBinDir = os.environ['PIGLIT_BUILD_DIR'] + '/bin/'
......@@ -216,454 +216,454 @@ if 'PIGLIT_SOURCE_DIR' not in os.environ:
#############################################################################
class TestResult(dict):
pass
pass
class GroupResult(dict):
def get_subgroup(self, path, create=True):
'''
Retrieve subgroup specified by path
For example, ``self.get_subgroup('a/b/c')`` will attempt to
return ``self['a']['b']['c']``. If any subgroup along ``path``
does not exist, then it will be created if ``create`` is true;
otherwise, ``None`` is returned.
'''
group = self
for subname in path.split('/'):
if subname not in group:
if create:
group[subname] = GroupResult()
else:
return None
group = group[subname]
assert(isinstance(group, GroupResult))
return group
@staticmethod
def make_tree(tests):
'''
Convert a flat dict of test results to a hierarchical tree
``tests`` is a dict whose items have form ``(path, TestResult)``,
where path is a string with form ``group1/group2/.../test_name``.
Return a tree whose leaves are the values of ``tests`` and
whose nodes, which have type ``GroupResult``, reflect the
paths in ``tests``.
'''
root = GroupResult()
for (path, result) in tests.items():
group_path = os.path.dirname(path)
test_name = os.path.basename(path)
group = root.get_subgroup(group_path)
group[test_name] = TestResult(result)
return root
def get_subgroup(self, path, create=True):
'''
Retrieve subgroup specified by path
For example, ``self.get_subgroup('a/b/c')`` will attempt to
return ``self['a']['b']['c']``. If any subgroup along ``path``
does not exist, then it will be created if ``create`` is true;
otherwise, ``None`` is returned.
'''
group = self
for subname in path.split('/'):
if subname not in group:
if create:
group[subname] = GroupResult()
else:
return None
group = group[subname]
assert(isinstance(group, GroupResult))
return group
@staticmethod
def make_tree(tests):
'''
Convert a flat dict of test results to a hierarchical tree
``tests`` is a dict whose items have form ``(path, TestResult)``,
where path is a string with form ``group1/group2/.../test_name``.
Return a tree whose leaves are the values of ``tests`` and
whose nodes, which have type ``GroupResult``, reflect the
paths in ``tests``.
'''
root = GroupResult()
for (path, result) in tests.items():
group_path = os.path.dirname(path)
test_name = os.path.basename(path)
group = root.get_subgroup(group_path)
group[test_name] = TestResult(result)
return root
class TestrunResult:
def __init__(self):
self.serialized_keys = [
'options',
'name',
'tests',
'wglinfo',
'glxinfo',
'lspci',
'time_elapsed',
]
self.name = None
self.glxinfo = None
self.lspci = None
self.tests = {}
def __repairFile(self, file):
'''
Reapair JSON file if necessary
If the JSON file is not closed properly, perhaps due a system
crash during a test run, then the JSON is repaired by
discarding the trailing, incomplete item and appending braces
to the file to close the JSON object.
The repair is performed on a string buffer, and the given file
is never written to. This allows the file to be safely read
during a test run.
:return: If no repair occured, then ``file`` is returned.
Otherwise, a new file object containing the repaired JSON
is returned.
'''
saved_position = file.tell()
lines = file.readlines()
file.seek(saved_position)
if lines[-1] == '}':
# JSON object was closed properly. No repair is
# necessary.
return file
# JSON object was not closed properly.
#
# To repair the file, we execute these steps:
# 1. Find the closing brace of the last, properly written
# test result.
# 2. Discard all subsequent lines.
# 3. Remove the trailing comma of that test result.
# 4. Append enough closing braces to close the json object.
# 5. Return a file object containing the repaired JSON.
# Each non-terminal test result ends with this line:
safe_line = 3 * JSONWriter.INDENT * ' ' + '},\n'
# Search for the last occurence of safe_line.
safe_line_num = None
for i in range(-1, - len(lines), -1):
if lines[i] == safe_line:
safe_line_num = i
break
if safe_line_num is None:
raise Exception('failed to repair corrupt result file: ' + file.name)
# Remove corrupt lines.
lines = lines[0:(safe_line_num + 1)]
# Remove trailing comma.
lines[-1] = 3 * JSONWriter.INDENT * ' ' + '}\n'
# Close json object.
lines.append(JSONWriter.INDENT * ' ' + '}\n')
lines.append('}')
# Return new file object containing the repaired JSON.
new_file = StringIO()
new_file.writelines(lines)
new_file.flush()
new_file.seek(0)
return new_file
def write(self, file):
# Serialize only the keys in serialized_keys.
keys = set(self.__dict__.keys()).intersection(self.serialized_keys)
raw_dict = dict([(k, self.__dict__[k]) for k in keys])
json.dump(raw_dict, file, indent=JSONWriter.INDENT)
def parseFile(self, file):
file = self.__repairFile(file)
raw_dict = json.load(file)
# Check that only expected keys were unserialized.
for key in raw_dict:
if key not in self.serialized_keys:
raise Exception('unexpected key in results file: ' + str(key))
self.__dict__.update(raw_dict)
# Replace each raw dict in self.tests with a TestResult.
for (path, result) in self.tests.items():
self.tests[path] = TestResult(result)
def __init__(self):
self.serialized_keys = [
'options',
'name',
'tests',
'wglinfo',
'glxinfo',
'lspci',
'time_elapsed',
]
self.name = None
self.glxinfo = None
self.lspci = None
self.tests = {}
def __repairFile(self, file):
'''
Reapair JSON file if necessary
If the JSON file is not closed properly, perhaps due a system
crash during a test run, then the JSON is repaired by
discarding the trailing, incomplete item and appending braces
to the file to close the JSON object.
The repair is performed on a string buffer, and the given file
is never written to. This allows the file to be safely read
during a test run.
:return: If no repair occured, then ``file`` is returned.
Otherwise, a new file object containing the repaired JSON
is returned.
'''
saved_position = file.tell()
lines = file.readlines()
file.seek(saved_position)
if lines[-1] == '}':
# JSON object was closed properly. No repair is
# necessary.
return file
# JSON object was not closed properly.
#
# To repair the file, we execute these steps:
# 1. Find the closing brace of the last, properly written
# test result.
# 2. Discard all subsequent lines.
# 3. Remove the trailing comma of that test result.
# 4. Append enough closing braces to close the json object.
# 5. Return a file object containing the repaired JSON.
# Each non-terminal test result ends with this line:
safe_line = 3 * JSONWriter.INDENT * ' ' + '},\n'
# Search for the last occurence of safe_line.
safe_line_num = None
for i in range(-1, - len(lines), -1):
if lines[i] == safe_line:
safe_line_num = i
break
if safe_line_num is None:
raise Exception('failed to repair corrupt result file: ' + file.name)
# Remove corrupt lines.
lines = lines[0:(safe_line_num + 1)]
# Remove trailing comma.
lines[-1] = 3 * JSONWriter.INDENT * ' ' + '}\n'
# Close json object.
lines.append(JSONWriter.INDENT * ' ' + '}\n')
lines.append('}')
# Return new file object containing the repaired JSON.
new_file = StringIO()
new_file.writelines(lines)
new_file.flush()
new_file.seek(0)
return new_file
def write(self, file):
# Serialize only the keys in serialized_keys.
keys = set(self.__dict__.keys()).intersection(self.serialized_keys)
raw_dict = dict([(k, self.__dict__[k]) for k in keys])
json.dump(raw_dict, file, indent=JSONWriter.INDENT)
def parseFile(self, file):
file = self.__repairFile(file)
raw_dict = json.load(file)
# Check that only expected keys were unserialized.
for key in raw_dict:
if key not in self.serialized_keys:
raise Exception('unexpected key in results file: ' + str(key))
self.__dict__.update(raw_dict)
# Replace each raw dict in self.tests with a TestResult.
for (path, result) in self.tests.items():
self.tests[path] = TestResult(result)
#############################################################################
##### Generic Test classes
#############################################################################
class Environment:
def __init__(self, concurrent=True, execute=True, include_filter=[],
exclude_filter=[], valgrind=False):
self.concurrent = concurrent
self.execute = execute
self.filter = []
self.exclude_filter = []
self.exclude_tests = set()
self.valgrind = valgrind
"""
The filter lists that are read in should be a list of string objects,
however, the filters need to be a list or regex object.
This code uses re.compile to rebuild the lists and set self.filter
"""
for each in include_filter:
self.filter.append(re.compile(each))
for each in exclude_filter:
self.exclude_filter.append(re.compile(each))
def run(self, command):
try:
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
(stdout, stderr) = p.communicate()
except:
return "Failed to run " + command
return stderr+stdout
def collectData(self):
result = {}
system = platform.system()
if (system == 'Windows' or
system.find("CYGWIN_NT") == 0):
result['wglinfo'] = self.run('wglinfo')
else:
result['glxinfo'] = self.run('glxinfo')
if system == 'Linux':
result['lspci'] = self.run('lspci')
return result
def __init__(self, concurrent=True, execute=True, include_filter=[],
exclude_filter=[], valgrind=False):
self.concurrent = concurrent
self.execute = execute
self.filter = []
self.exclude_filter = []
self.exclude_tests = set()
self.valgrind = valgrind
"""
The filter lists that are read in should be a list of string objects,
however, the filters need to be a list or regex object.
This code uses re.compile to rebuild the lists and set self.filter
"""
for each in include_filter:
self.filter.append(re.compile(each))
for each in exclude_filter:
self.exclude_filter.append(re.compile(each))
def run(self, command):
try:
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
(stdout, stderr) = p.communicate()
except:
return "Failed to run " + command
return stderr+stdout
def collectData(self):
result = {}
system = platform.system()
if (system == 'Windows' or
system.find("CYGWIN_NT") == 0):
result['wglinfo'] = self.run('wglinfo')
else:
result['glxinfo'] = self.run('glxinfo')
if system == 'Linux':
result['lspci'] = self.run('lspci')
return result
class Test:
ignoreErrors = []
def __init__(self, runConcurrent = False):
'''
'runConcurrent' controls whether this test will
execute it's work (i.e. __doRunWork) on the calling thread
(i.e. the main thread) or from the ConcurrentTestPool threads.
'''
self.runConcurrent = runConcurrent
self.skip_test = False
def run(self):
raise NotImplementedError