Commit b9c4f967 authored by Dylan Baker's avatar Dylan Baker Committed by Eric Anholt

python: Convert tabs to spaces

PEP 8 specifies that all indents should be either 4 spaces or an
equivalent tab indent, but without mixing tabs and spaces, with a
preference for spaces over tabs because they are absolute. Tabs and
spaces should not be mixed because they lead to expected indent level
errors.

Currently piglit uses a mixture of spaces and tabs, this patch uses the
python tools reindent.py to convert the tabs to spaces.
Signed-off-by: Dylan Baker's avatarDylan Baker <baker.dylan.c@gmail.com>
v2: Rebase against master, fix apparent unintentional unindentation of
    help text in piglit-summary.py (changes by anholt)
Reviewed-by: Eric Anholt's avatarEric Anholt <eric@anholt.net>
parent 80071932
This diff is collapsed.
This diff is collapsed.
......@@ -30,26 +30,26 @@ from exectest import ExecTest
##### GleanTest: Execute a sub-test of Glean
#############################################################################
def gleanExecutable():
return testBinDir + 'glean'
return testBinDir + 'glean'
class GleanTest(ExecTest):
globalParams = []
globalParams = []
def __init__(self, name):
ExecTest.__init__(self, \
[gleanExecutable(),
"-o",
"-v", "-v", "-v",
"-t", "+"+name])
self.name = name
def __init__(self, name):
ExecTest.__init__(self, \
[gleanExecutable(),
"-o",
"-v", "-v", "-v",
"-t", "+"+name])
self.name = name
def run(self, valgrind):
self.command += GleanTest.globalParams
return ExecTest.run(self, valgrind)
def run(self, valgrind):
self.command += GleanTest.globalParams
return ExecTest.run(self, valgrind)
def interpretResult(self, out, returncode, results):
if out.find('FAIL') >= 0:
results['result'] = 'fail'
else:
results['result'] = 'pass'
return out
def interpretResult(self, out, returncode, results):
if out.find('FAIL') >= 0:
results['result'] = 'fail'
else:
results['result'] = 'pass'
return out
This diff is collapsed.
......@@ -362,4 +362,3 @@ class Main:
suite = self.create_suite()
self.run_suite(suite)
......@@ -26,24 +26,24 @@ from patterns import Singleton
import logging
class Logger(Singleton):
@synchronized_self
def __logMessage(self, logfunc, message, **kwargs):
[logfunc(line, **kwargs) for line in message.split('\n')]
@synchronized_self
def __logMessage(self, logfunc, message, **kwargs):
[logfunc(line, **kwargs) for line in message.split('\n')]
@synchronized_self
def getLogger(self, channel = None):
if 0 == len(logging.root.handlers):
logging.basicConfig(
format = "[%(asctime)s] :: %(message)+8s :: %(name)s",
datefmt = "%c",
level = logging.INFO,
)
if channel is None:
channel = "base"
logger = logging.getLogger(channel)
return logger
@synchronized_self
def getLogger(self, channel = None):
if 0 == len(logging.root.handlers):
logging.basicConfig(
format = "[%(asctime)s] :: %(message)+8s :: %(name)s",
datefmt = "%c",
level = logging.INFO,
)
if channel is None:
channel = "base"
logger = logging.getLogger(channel)
return logger
def log(self, type = logging.INFO, msg = "", channel = None):
self.__logMessage(lambda m, **kwargs: self.getLogger(channel).log(type, m, **kwargs), msg)
def log(self, type = logging.INFO, msg = "", channel = None):
self.__logMessage(lambda m, **kwargs: self.getLogger(channel).log(type, m, **kwargs), msg)
log = Logger().log
......@@ -24,64 +24,64 @@
import threading
class Singleton(object):
'''
Modeled after http://www.python.org/download/releases/2.2.3/descrintro/*__new__
'''
Modeled after http://www.python.org/download/releases/2.2.3/descrintro/*__new__
A thread-safe (mostly -- see NOTE) Singleton class pattern.
A thread-safe (mostly -- see NOTE) Singleton class pattern.
NOTE: deleting a singleton instance (i.e. Singleton::delInstance) does not guarantee that something
else is currently using it. To reduce this risk, a program should not hold a reference to the
instance. Rather, use the create/construct syntax (see example below) to access the instance. Yet,
this still does not guarantee that this type of usage will result in a desired effect in a
multithreaded program.
You've been warned so use the singleton pattern wisely!
NOTE: deleting a singleton instance (i.e. Singleton::delInstance) does not guarantee that something
else is currently using it. To reduce this risk, a program should not hold a reference to the
instance. Rather, use the create/construct syntax (see example below) to access the instance. Yet,
this still does not guarantee that this type of usage will result in a desired effect in a
multithreaded program.
You've been warned so use the singleton pattern wisely!
Example:
Example:
class MySingletonClass(Singleton):
def init(self):
print "in MySingletonClass::init()", self
class MySingletonClass(Singleton):
def init(self):
print "in MySingletonClass::init()", self
def foo(self):
print "in MySingletonClass::foo()", self
def foo(self):
print "in MySingletonClass::foo()", self
MySingletonClass().foo()
MySingletonClass().foo()
MySingletonClass().foo()
MySingletonClass().foo()
MySingletonClass().foo()
MySingletonClass().foo()
---> output will look something like this:
in MySingletonClass::init() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
'''
---> output will look something like this:
in MySingletonClass::init() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
'''
lock = threading.RLock()
lock = threading.RLock()
def __new__(cls, *args, **kwargs):
try:
cls.lock.acquire()
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
it.init(*args, **kwargs)
return it
finally: # this always gets called, even when returning from within the try block
cls.lock.release()
def __new__(cls, *args, **kwargs):
try:
cls.lock.acquire()
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
it.init(*args, **kwargs)
return it
finally: # this always gets called, even when returning from within the try block
cls.lock.release()
def init(self, *args, **kwargs):
'''
Derived classes should override this method to do its initializations
The derived class should not implement a '__init__' method.
'''
pass
def init(self, *args, **kwargs):
'''
Derived classes should override this method to do its initializations
The derived class should not implement a '__init__' method.
'''
pass
@classmethod
def delInstance(cls):
cls.lock.acquire()
try:
if cls.__dict__.get('__it__') is not None:
del cls.__it__
finally:
cls.lock.release()
@classmethod
def delInstance(cls):
cls.lock.acquire()
try:
if cls.__dict__.get('__it__') is not None:
del cls.__it__
finally:
cls.lock.release()
This diff is collapsed.
......@@ -28,32 +28,32 @@ from weakref import WeakKeyDictionary
import multiprocessing
def synchronized_self(function):
'''
A decorator function for providing multithreaded, synchronized access
amongst one or more functions within a class instance.
'''
def wrapper(self, *args, **kwargs):
synchronized_self.locks.setdefault(self, RLock()).acquire()
try:
return function(self, *args, **kwargs)
finally:
synchronized_self.locks[self].release()
return wrapper
'''
A decorator function for providing multithreaded, synchronized access
amongst one or more functions within a class instance.
'''
def wrapper(self, *args, **kwargs):
synchronized_self.locks.setdefault(self, RLock()).acquire()
try:
return function(self, *args, **kwargs)
finally:
synchronized_self.locks[self].release()
return wrapper
synchronized_self.locks = WeakKeyDictionary() # track the locks for each instance
class ConcurrentTestPool(Singleton):
@synchronized_self
def init(self):
self.pool = ThreadPool(multiprocessing.cpu_count())
@synchronized_self
def init(self):
self.pool = ThreadPool(multiprocessing.cpu_count())
@synchronized_self
def put(self, callable_, args = None, kwds = None):
self.pool.putRequest(
WorkRequest(
callable_, args = args, kwds = kwds
)
)
@synchronized_self
def put(self, callable_, args = None, kwds = None):
self.pool.putRequest(
WorkRequest(
callable_, args = args, kwds = kwds
)
)
def join(self):
self.pool.wait()
def join(self):
self.pool.wait()
......@@ -32,46 +32,46 @@ import re
import sys
def usage():
USAGE = """\
USAGE = """\
Usage %(progName) [cppfile] [add_prefix]
cppfile: path to glean cppfile to parse
add_suffix: prefix to have in test name i.e. glsl1 -> add_glsl1
"""
print USAGE % {'progName':sys.argv[0]}
sys.exit(1)
print USAGE % {'progName':sys.argv[0]}
sys.exit(1)
def main():
try:
options, args = getopt(sys.argv[1:], "hdt:n:x:", [ "help", "dry-run", "tests=", "name=", "exclude-tests=" ])
except GetoptError:
usage()
try:
options, args = getopt(sys.argv[1:], "hdt:n:x:", [ "help", "dry-run", "tests=", "name=", "exclude-tests=" ])
except GetoptError:
usage()
if len(args) != 2:
usage()
if len(args) != 2:
usage()
suffix = args[1]
suffix = args[1]
fileIN = open(args[0], 'r')
line = fileIN.readline()
next_is_name = False
fileIN = open(args[0], 'r')
line = fileIN.readline()
next_is_name = False
while line:
if next_is_name:
name = line.lstrip(" \",")
name = name.rstrip("\n")
if re.match(r'GLint stat', name):
break
if not re.match(r'//', name):
name = re.sub(r'".*',
r'',
name)
print "add_" + suffix + "('" + name + "')"
next_is_name = False
if line == " {\n":
next_is_name = True
line = fileIN.readline()
while line:
if next_is_name:
name = line.lstrip(" \",")
name = name.rstrip("\n")
if re.match(r'GLint stat', name):
break
if not re.match(r'//', name):
name = re.sub(r'".*',
r'',
name)
print "add_" + suffix + "('" + name + "')"
next_is_name = False
if line == " {\n":
next_is_name = True
line = fileIN.readline()
if __name__ == "__main__":
main()
main()
......@@ -202,28 +202,28 @@ def translate_category(category_name):
# which names are synonymous with which other names.
class SynonymMap(object):
def __init__(self):
# __name_to_synonyms maps from a function name to the set of
# all names that are synonymous with it (including itself).
self.__name_to_synonyms = {}
# __name_to_synonyms maps from a function name to the set of
# all names that are synonymous with it (including itself).
self.__name_to_synonyms = {}
# Add a single function name which is not (yet) known to be
# synonymous with any other name. No effect if the function name
# is already known.
def add_singleton(self, name):
if name not in self.__name_to_synonyms:
self.__name_to_synonyms[name] = frozenset([name])
return self.__name_to_synonyms[name]
if name not in self.__name_to_synonyms:
self.__name_to_synonyms[name] = frozenset([name])
return self.__name_to_synonyms[name]
# Add a pair of function names, and note that they are synonymous.
# Synonymity is transitive, so if either of the two function names
# previously had known synonyms, all synonyms are combined into a
# single set.
def add_alias(self, name, alias):
name_ss = self.add_singleton(name)
alias_ss = self.add_singleton(alias)
combined_set = name_ss | alias_ss
for n in combined_set:
self.__name_to_synonyms[n] = combined_set
name_ss = self.add_singleton(name)
alias_ss = self.add_singleton(alias)
combined_set = name_ss | alias_ss
for n in combined_set:
self.__name_to_synonyms[n] = combined_set
# Get a set of sets of synonymous functions.
def get_synonym_sets(self):
......@@ -233,56 +233,56 @@ class SynonymMap(object):
# In-memory representation of the GL API.
class Api(object):
def __init__(self):
# Api.type_translation is a dict mapping abstract type names
# to C types. It is based on the data in the gl.tm file. For
# example, the dict entry for String is:
#
# 'String': 'const GLubyte *'
# Api.type_translation is a dict mapping abstract type names
# to C types. It is based on the data in the gl.tm file. For
# example, the dict entry for String is:
#
# 'String': 'const GLubyte *'
self.type_translation = {}
# Api.enums is a dict mapping enum names (without the 'GL_'
# prefix) to a dict containing (a) the enum value expressed as
# an integer, and (b) the enum value expressed as a C literal.
# It is based on the data in the gl.spec file. For example,
# the dict entry for GL_CLIENT_ALL_ATTRIB_BITS is:
#
# 'CLIENT_ALL_ATTRIB_BITS': { 'value_int': 4294967295,
# 'value_str': "0xFFFFFFFF" }
# Api.enums is a dict mapping enum names (without the 'GL_'
# prefix) to a dict containing (a) the enum value expressed as
# an integer, and (b) the enum value expressed as a C literal.
# It is based on the data in the gl.spec file. For example,
# the dict entry for GL_CLIENT_ALL_ATTRIB_BITS is:
#
# 'CLIENT_ALL_ATTRIB_BITS': { 'value_int': 4294967295,
# 'value_str': "0xFFFFFFFF" }
self.enums = {}
# Api.functions is a dict mapping function names (without the
# 'gl' prefix) to a dict containing (a) the name of the
# category the function is in, (b) the function call parameter
# names, (c) the function call parameter types, and (d) the
# function return type. It is based on the data in the
# gl.spec file, cross-referenced against the type translations
# from the gl.tm file. For example, the dict entry for
# glAreTexturesResident is:
#
# 'AreTexturesResident': {
# 'category': '1.1',
# 'param_names': ['n', 'textures', 'residences'],
# 'param_types': ['GLsizei', 'const GLuint *', 'GLboolean *'],
# 'return_type': ['GLboolean'] }
# Api.functions is a dict mapping function names (without the
# 'gl' prefix) to a dict containing (a) the name of the
# category the function is in, (b) the function call parameter
# names, (c) the function call parameter types, and (d) the
# function return type. It is based on the data in the
# gl.spec file, cross-referenced against the type translations
# from the gl.tm file. For example, the dict entry for
# glAreTexturesResident is:
#
# 'AreTexturesResident': {
# 'category': '1.1',
# 'param_names': ['n', 'textures', 'residences'],
# 'param_types': ['GLsizei', 'const GLuint *', 'GLboolean *'],
# 'return_type': ['GLboolean'] }
self.functions = {}
# Api.synonyms is a SynonymMap object which records which
# function names are aliases of each other. It is based on
# the "alias" declarations from the gl.spec file.
# Api.synonyms is a SynonymMap object which records which
# function names are aliases of each other. It is based on
# the "alias" declarations from the gl.spec file.
self.synonyms = SynonymMap()
# Api.categories is a dict mapping category names to a dict
# describing the category. For categories representing a GL
# version, the dict entry looks like this:
#
# '2.1': { 'kind': 'GL', 'gl_10x_version': 21 }
#
# For categories representing an extension, the dict entry
# looks like this:
#
# 'GL_ARB_sync': { 'kind': 'extension',
# 'extension_name': 'GL_ARB_sync' }
self.categories = {}
# Api.categories is a dict mapping category names to a dict
# describing the category. For categories representing a GL
# version, the dict entry looks like this:
#
# '2.1': { 'kind': 'GL', 'gl_10x_version': 21 }
#
# For categories representing an extension, the dict entry
# looks like this:
#
# 'GL_ARB_sync': { 'kind': 'extension',
# 'extension_name': 'GL_ARB_sync' }
self.categories = {}
# Convert each line in the gl.tm file into a key/value pair in
# self.type_translation, mapping an abstract type name to a C
......@@ -391,11 +391,11 @@ class Api(object):
'Function {0!r} parameter {1!r} uses unrecognized '
'direction {2!r}'.format(
name, param_name, param_dir))
else:
raise Exception(
'Function {0!r} parameter {1!r} uses unrecognized '
'multiplicity {2!r}'.format(
name, param_name, param_multiplicity))
else:
raise Exception(
'Function {0!r} parameter {1!r} uses unrecognized '
'multiplicity {2!r}'.format(
name, param_name, param_multiplicity))
param_types[param_index] = param_type
if len(attributes['return']) != 1:
raise Exception(
......@@ -413,7 +413,7 @@ class Api(object):
'return_type': self.type_translation[attributes['return'][0]],
'param_names': param_names,
'param_types': param_types,
'category': category,
'category': category,
}
self.synonyms.add_singleton(name)
for alias in attributes['alias']:
......@@ -466,4 +466,4 @@ if __name__ == '__main__':
with open(sys.argv[3]) as f:
api.read_enumext_spec(f)
with open(sys.argv[4], 'w') as f:
f.write(api.to_json())
f.write(api.to_json())
......@@ -34,23 +34,23 @@ import framework.core as core
##### Main program
#############################################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument("results",
metavar = "<First Results File>",
nargs = "*",
help = "Space seperated list of results files")
args = parser.parse_args()
parser = argparse.ArgumentParser()
parser.add_argument("results",
metavar = "<First Results File>",
nargs = "*",
help = "Space seperated list of results files")
args = parser.parse_args()
combined = core.loadTestResults(args.results.pop(0))
combined = core.loadTestResults(args.results.pop(0))
for resultsDir in args.results:
results = core.loadTestResults(resultsDir)
for resultsDir in args.results:
results = core.loadTestResults(resultsDir)
for testname, result in results.tests.items():
combined.tests[testname] = result
for testname, result in results.tests.items():
combined.tests[testname] = result
combined.write(sys.stdout)
combined.write(sys.stdout)
if __name__ == "__main__":
main()
main()
......@@ -39,62 +39,62 @@ from framework.gleantest import GleanTest
#############################################################################
def main():
parser = argparse.ArgumentParser(sys.argv)
parser = argparse.ArgumentParser(sys.argv)
parser.add_argument("-t", "--include-tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Run only matching tests (can be used more than once)")
parser.add_argument("--tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Run only matching tests (can be used more than once)" \
"Deprecated")
parser.add_argument("-x", "--exclude-tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Exclude matching tests (can be used more than once)")
parser.add_argument("testProfile",
metavar = "<Path to testfile>",
help = "Path to results folder")
parser.add_argument("-t", "--include-tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Run only matching tests (can be used more than once)")
parser.add_argument("--tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Run only matching tests (can be used more than once)" \
"Deprecated")
parser.add_argument("-x", "--exclude-tests",
default = [],
action = "append",
metavar = "<regex>",
help = "Exclude matching tests (can be used more than once)")
parser.add_argument("testProfile",
metavar = "<Path to testfile>",
help = "Path to results folder")
args = parser.parse_args()
args = parser.parse_args()
# Deprecated
# --include-tests is the standard going forward, but for backwards
# compatability merge args.tests into args.include_tests and drop
# duplicates
if args.tests != []:
print "Warnings: Option --tests is deprecated, use --include-tests"
args.include_tests = list(set(args.include_tests + args.tests))
# Set the environment, pass in the included and excluded tests
env = core.Environment(
exclude_filter=args.exclude_tests,
include_filter=args.include_tests,
)
# Deprecated
# --include-tests is the standard going forward, but for backwards
# compatability merge args.tests into args.include_tests and drop
# duplicates
if args.tests != []:
print "Warnings: Option --tests is deprecated, use --include-tests"
args.include_tests = list(set(args.include_tests + args.tests))
# Change to the piglit's path
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
os.chdir(piglit_dir)
# Set the environment, pass in the included and excluded tests
env = core.Environment(
exclude_filter=args.exclude_tests,
include_filter=args.include_tests,
)
profile = core.loadTestProfile(args.testProfile)
# Change to the piglit's path
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
os.chdir(piglit_dir)
def getCommand(test):
command = ''
if isinstance(test, GleanTest):
for var, val in test.env.items():
command += var + "='" + val + "' "
command += ' '.join(test.command)
return command
profile = core.loadTestProfile(args.testProfile)
profile.prepare_test_list(env)
for name, test in profile.test_list.items():
assert(isinstance(test, ExecTest))
print name, ':::', getCommand(test)
def getCommand(test):
command = ''
if isinstance(test, GleanTest):
for var, val in test.env.items():
command += var + "='" + val + "' "
command += ' '.join(test.command)
return command
profile.prepare_test_list(env)
for name, test in profile.test_list.items():
assert(isinstance(test, ExecTest))
print name, ':::', getCommand(test)
if __name__ == "__main__":
main()
main()
This diff is collapsed.
This diff is collapsed.
......@@ -36,98 +36,98 @@ from framework import junit
class Writer:
def __init__(self, filename):
self.report = junit.Report(filename)
self.path = []
def write(self, arg):
results = [framework.core.loadTestResults(arg)]
summary = framework.summary.Summary(results)
self.report.start()
self.report.startSuite('piglit')
try:
for test in summary.allTests():
self.write_test(summary, test)
finally:
self.enter_path([])
self.report.stopSuite()
self.report.stop()
def write_test(self, summary, test):
test_path = test.path.split('/')
test_name = test_path.pop()
self.enter_path(test_path)
assert len(summary.testruns) == 1
tr = summary.testruns[0]
result = test.results[0]
self.report.startCase(test_name)
duration = None
try:
try:
self.report.addStdout(result['command'] + '\n')
except KeyError:
pass
try:
self.report.addStderr(result['info'])
except KeyError:
pass
success = result.get('result')
if success in ('pass', 'warn'):