Commit b9c4f967 authored by Dylan Baker's avatar Dylan Baker Committed by Eric Anholt

python: Convert tabs to spaces

PEP 8 specifies that all indents should be either 4 spaces or an
equivalent tab indent, but without mixing tabs and spaces, with a
preference for spaces over tabs because they are absolute. Tabs and
spaces should not be mixed because they lead to expected indent level
errors.

Currently piglit uses a mixture of spaces and tabs, this patch uses the
python tools reindent.py to convert the tabs to spaces.
Signed-off-by: Dylan Baker's avatarDylan Baker <baker.dylan.c@gmail.com>
v2: Rebase against master, fix apparent unintentional unindentation of
    help text in piglit-summary.py (changes by anholt)
Reviewed-by: Eric Anholt's avatarEric Anholt <eric@anholt.net>
parent 80071932
This diff is collapsed.
This diff is collapsed.
...@@ -30,26 +30,26 @@ from exectest import ExecTest ...@@ -30,26 +30,26 @@ from exectest import ExecTest
##### GleanTest: Execute a sub-test of Glean ##### GleanTest: Execute a sub-test of Glean
############################################################################# #############################################################################
def gleanExecutable(): def gleanExecutable():
return testBinDir + 'glean' return testBinDir + 'glean'
class GleanTest(ExecTest): class GleanTest(ExecTest):
globalParams = [] globalParams = []
def __init__(self, name): def __init__(self, name):
ExecTest.__init__(self, \ ExecTest.__init__(self, \
[gleanExecutable(), [gleanExecutable(),
"-o", "-o",
"-v", "-v", "-v", "-v", "-v", "-v",
"-t", "+"+name]) "-t", "+"+name])
self.name = name self.name = name
def run(self, valgrind): def run(self, valgrind):
self.command += GleanTest.globalParams self.command += GleanTest.globalParams
return ExecTest.run(self, valgrind) return ExecTest.run(self, valgrind)
def interpretResult(self, out, returncode, results): def interpretResult(self, out, returncode, results):
if out.find('FAIL') >= 0: if out.find('FAIL') >= 0:
results['result'] = 'fail' results['result'] = 'fail'
else: else:
results['result'] = 'pass' results['result'] = 'pass'
return out return out
This diff is collapsed.
...@@ -362,4 +362,3 @@ class Main: ...@@ -362,4 +362,3 @@ class Main:
suite = self.create_suite() suite = self.create_suite()
self.run_suite(suite) self.run_suite(suite)
...@@ -26,24 +26,24 @@ from patterns import Singleton ...@@ -26,24 +26,24 @@ from patterns import Singleton
import logging import logging
class Logger(Singleton): class Logger(Singleton):
@synchronized_self @synchronized_self
def __logMessage(self, logfunc, message, **kwargs): def __logMessage(self, logfunc, message, **kwargs):
[logfunc(line, **kwargs) for line in message.split('\n')] [logfunc(line, **kwargs) for line in message.split('\n')]
@synchronized_self @synchronized_self
def getLogger(self, channel = None): def getLogger(self, channel = None):
if 0 == len(logging.root.handlers): if 0 == len(logging.root.handlers):
logging.basicConfig( logging.basicConfig(
format = "[%(asctime)s] :: %(message)+8s :: %(name)s", format = "[%(asctime)s] :: %(message)+8s :: %(name)s",
datefmt = "%c", datefmt = "%c",
level = logging.INFO, level = logging.INFO,
) )
if channel is None: if channel is None:
channel = "base" channel = "base"
logger = logging.getLogger(channel) logger = logging.getLogger(channel)
return logger return logger
def log(self, type = logging.INFO, msg = "", channel = None): def log(self, type = logging.INFO, msg = "", channel = None):
self.__logMessage(lambda m, **kwargs: self.getLogger(channel).log(type, m, **kwargs), msg) self.__logMessage(lambda m, **kwargs: self.getLogger(channel).log(type, m, **kwargs), msg)
log = Logger().log log = Logger().log
...@@ -24,64 +24,64 @@ ...@@ -24,64 +24,64 @@
import threading import threading
class Singleton(object): class Singleton(object):
''' '''
Modeled after http://www.python.org/download/releases/2.2.3/descrintro/*__new__ Modeled after http://www.python.org/download/releases/2.2.3/descrintro/*__new__
A thread-safe (mostly -- see NOTE) Singleton class pattern. A thread-safe (mostly -- see NOTE) Singleton class pattern.
NOTE: deleting a singleton instance (i.e. Singleton::delInstance) does not guarantee that something NOTE: deleting a singleton instance (i.e. Singleton::delInstance) does not guarantee that something
else is currently using it. To reduce this risk, a program should not hold a reference to the else is currently using it. To reduce this risk, a program should not hold a reference to the
instance. Rather, use the create/construct syntax (see example below) to access the instance. Yet, instance. Rather, use the create/construct syntax (see example below) to access the instance. Yet,
this still does not guarantee that this type of usage will result in a desired effect in a this still does not guarantee that this type of usage will result in a desired effect in a
multithreaded program. multithreaded program.
You've been warned so use the singleton pattern wisely! You've been warned so use the singleton pattern wisely!
Example: Example:
class MySingletonClass(Singleton): class MySingletonClass(Singleton):
def init(self): def init(self):
print "in MySingletonClass::init()", self print "in MySingletonClass::init()", self
def foo(self): def foo(self):
print "in MySingletonClass::foo()", self print "in MySingletonClass::foo()", self
MySingletonClass().foo() MySingletonClass().foo()
MySingletonClass().foo() MySingletonClass().foo()
MySingletonClass().foo() MySingletonClass().foo()
---> output will look something like this: ---> output will look something like this:
in MySingletonClass::init() <__main__.MySingletonClass object at 0x7ff5b322f3d0> in MySingletonClass::init() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0> in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0> in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0> in MySingletonClass::foo() <__main__.MySingletonClass object at 0x7ff5b322f3d0>
''' '''
lock = threading.RLock() lock = threading.RLock()
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
try: try:
cls.lock.acquire() cls.lock.acquire()
it = cls.__dict__.get('__it__') it = cls.__dict__.get('__it__')
if it is not None: if it is not None:
return it return it
cls.__it__ = it = object.__new__(cls) cls.__it__ = it = object.__new__(cls)
it.init(*args, **kwargs) it.init(*args, **kwargs)
return it return it
finally: # this always gets called, even when returning from within the try block finally: # this always gets called, even when returning from within the try block
cls.lock.release() cls.lock.release()
def init(self, *args, **kwargs): def init(self, *args, **kwargs):
''' '''
Derived classes should override this method to do its initializations Derived classes should override this method to do its initializations
The derived class should not implement a '__init__' method. The derived class should not implement a '__init__' method.
''' '''
pass pass
@classmethod @classmethod
def delInstance(cls): def delInstance(cls):
cls.lock.acquire() cls.lock.acquire()
try: try:
if cls.__dict__.get('__it__') is not None: if cls.__dict__.get('__it__') is not None:
del cls.__it__ del cls.__it__
finally: finally:
cls.lock.release() cls.lock.release()
This diff is collapsed.
...@@ -28,32 +28,32 @@ from weakref import WeakKeyDictionary ...@@ -28,32 +28,32 @@ from weakref import WeakKeyDictionary
import multiprocessing import multiprocessing
def synchronized_self(function): def synchronized_self(function):
''' '''
A decorator function for providing multithreaded, synchronized access A decorator function for providing multithreaded, synchronized access
amongst one or more functions within a class instance. amongst one or more functions within a class instance.
''' '''
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):
synchronized_self.locks.setdefault(self, RLock()).acquire() synchronized_self.locks.setdefault(self, RLock()).acquire()
try: try:
return function(self, *args, **kwargs) return function(self, *args, **kwargs)
finally: finally:
synchronized_self.locks[self].release() synchronized_self.locks[self].release()
return wrapper return wrapper
synchronized_self.locks = WeakKeyDictionary() # track the locks for each instance synchronized_self.locks = WeakKeyDictionary() # track the locks for each instance
class ConcurrentTestPool(Singleton): class ConcurrentTestPool(Singleton):
@synchronized_self @synchronized_self
def init(self): def init(self):
self.pool = ThreadPool(multiprocessing.cpu_count()) self.pool = ThreadPool(multiprocessing.cpu_count())
@synchronized_self @synchronized_self
def put(self, callable_, args = None, kwds = None): def put(self, callable_, args = None, kwds = None):
self.pool.putRequest( self.pool.putRequest(
WorkRequest( WorkRequest(
callable_, args = args, kwds = kwds callable_, args = args, kwds = kwds
) )
) )
def join(self): def join(self):
self.pool.wait() self.pool.wait()
...@@ -32,46 +32,46 @@ import re ...@@ -32,46 +32,46 @@ import re
import sys import sys
def usage(): def usage():
USAGE = """\ USAGE = """\
Usage %(progName) [cppfile] [add_prefix] Usage %(progName) [cppfile] [add_prefix]
cppfile: path to glean cppfile to parse cppfile: path to glean cppfile to parse
add_suffix: prefix to have in test name i.e. glsl1 -> add_glsl1 add_suffix: prefix to have in test name i.e. glsl1 -> add_glsl1
""" """
print USAGE % {'progName':sys.argv[0]} print USAGE % {'progName':sys.argv[0]}
sys.exit(1) sys.exit(1)
def main(): def main():
try: try:
options, args = getopt(sys.argv[1:], "hdt:n:x:", [ "help", "dry-run", "tests=", "name=", "exclude-tests=" ]) options, args = getopt(sys.argv[1:], "hdt:n:x:", [ "help", "dry-run", "tests=", "name=", "exclude-tests=" ])
except GetoptError: except GetoptError:
usage() usage()
if len(args) != 2: if len(args) != 2:
usage() usage()
suffix = args[1] suffix = args[1]
fileIN = open(args[0], 'r') fileIN = open(args[0], 'r')
line = fileIN.readline() line = fileIN.readline()
next_is_name = False next_is_name = False
while line: while line:
if next_is_name: if next_is_name:
name = line.lstrip(" \",") name = line.lstrip(" \",")
name = name.rstrip("\n") name = name.rstrip("\n")
if re.match(r'GLint stat', name): if re.match(r'GLint stat', name):
break break
if not re.match(r'//', name): if not re.match(r'//', name):
name = re.sub(r'".*', name = re.sub(r'".*',
r'', r'',
name) name)
print "add_" + suffix + "('" + name + "')" print "add_" + suffix + "('" + name + "')"
next_is_name = False next_is_name = False
if line == " {\n": if line == " {\n":
next_is_name = True next_is_name = True
line = fileIN.readline() line = fileIN.readline()
if __name__ == "__main__": if __name__ == "__main__":
main() main()
...@@ -202,28 +202,28 @@ def translate_category(category_name): ...@@ -202,28 +202,28 @@ def translate_category(category_name):
# which names are synonymous with which other names. # which names are synonymous with which other names.
class SynonymMap(object): class SynonymMap(object):
def __init__(self): def __init__(self):
# __name_to_synonyms maps from a function name to the set of # __name_to_synonyms maps from a function name to the set of
# all names that are synonymous with it (including itself). # all names that are synonymous with it (including itself).
self.__name_to_synonyms = {} self.__name_to_synonyms = {}
# Add a single function name which is not (yet) known to be # Add a single function name which is not (yet) known to be
# synonymous with any other name. No effect if the function name # synonymous with any other name. No effect if the function name
# is already known. # is already known.
def add_singleton(self, name): def add_singleton(self, name):
if name not in self.__name_to_synonyms: if name not in self.__name_to_synonyms:
self.__name_to_synonyms[name] = frozenset([name]) self.__name_to_synonyms[name] = frozenset([name])
return self.__name_to_synonyms[name] return self.__name_to_synonyms[name]
# Add a pair of function names, and note that they are synonymous. # Add a pair of function names, and note that they are synonymous.
# Synonymity is transitive, so if either of the two function names # Synonymity is transitive, so if either of the two function names
# previously had known synonyms, all synonyms are combined into a # previously had known synonyms, all synonyms are combined into a
# single set. # single set.
def add_alias(self, name, alias): def add_alias(self, name, alias):
name_ss = self.add_singleton(name) name_ss = self.add_singleton(name)
alias_ss = self.add_singleton(alias) alias_ss = self.add_singleton(alias)
combined_set = name_ss | alias_ss combined_set = name_ss | alias_ss
for n in combined_set: for n in combined_set:
self.__name_to_synonyms[n] = combined_set self.__name_to_synonyms[n] = combined_set
# Get a set of sets of synonymous functions. # Get a set of sets of synonymous functions.
def get_synonym_sets(self): def get_synonym_sets(self):
...@@ -233,56 +233,56 @@ class SynonymMap(object): ...@@ -233,56 +233,56 @@ class SynonymMap(object):
# In-memory representation of the GL API. # In-memory representation of the GL API.
class Api(object): class Api(object):
def __init__(self): def __init__(self):
# Api.type_translation is a dict mapping abstract type names # Api.type_translation is a dict mapping abstract type names
# to C types. It is based on the data in the gl.tm file. For # to C types. It is based on the data in the gl.tm file. For
# example, the dict entry for String is: # example, the dict entry for String is:
# #
# 'String': 'const GLubyte *' # 'String': 'const GLubyte *'
self.type_translation = {} self.type_translation = {}
# Api.enums is a dict mapping enum names (without the 'GL_' # Api.enums is a dict mapping enum names (without the 'GL_'
# prefix) to a dict containing (a) the enum value expressed as # prefix) to a dict containing (a) the enum value expressed as
# an integer, and (b) the enum value expressed as a C literal. # an integer, and (b) the enum value expressed as a C literal.
# It is based on the data in the gl.spec file. For example, # It is based on the data in the gl.spec file. For example,
# the dict entry for GL_CLIENT_ALL_ATTRIB_BITS is: # the dict entry for GL_CLIENT_ALL_ATTRIB_BITS is:
# #
# 'CLIENT_ALL_ATTRIB_BITS': { 'value_int': 4294967295, # 'CLIENT_ALL_ATTRIB_BITS': { 'value_int': 4294967295,
# 'value_str': "0xFFFFFFFF" } # 'value_str': "0xFFFFFFFF" }
self.enums = {} self.enums = {}
# Api.functions is a dict mapping function names (without the # Api.functions is a dict mapping function names (without the
# 'gl' prefix) to a dict containing (a) the name of the # 'gl' prefix) to a dict containing (a) the name of the
# category the function is in, (b) the function call parameter # category the function is in, (b) the function call parameter
# names, (c) the function call parameter types, and (d) the # names, (c) the function call parameter types, and (d) the
# function return type. It is based on the data in the # function return type. It is based on the data in the
# gl.spec file, cross-referenced against the type translations # gl.spec file, cross-referenced against the type translations
# from the gl.tm file. For example, the dict entry for # from the gl.tm file. For example, the dict entry for
# glAreTexturesResident is: # glAreTexturesResident is:
# #
# 'AreTexturesResident': { # 'AreTexturesResident': {
# 'category': '1.1', # 'category': '1.1',
# 'param_names': ['n', 'textures', 'residences'], # 'param_names': ['n', 'textures', 'residences'],
# 'param_types': ['GLsizei', 'const GLuint *', 'GLboolean *'], # 'param_types': ['GLsizei', 'const GLuint *', 'GLboolean *'],
# 'return_type': ['GLboolean'] } # 'return_type': ['GLboolean'] }
self.functions = {} self.functions = {}
# Api.synonyms is a SynonymMap object which records which # Api.synonyms is a SynonymMap object which records which
# function names are aliases of each other. It is based on # function names are aliases of each other. It is based on
# the "alias" declarations from the gl.spec file. # the "alias" declarations from the gl.spec file.
self.synonyms = SynonymMap() self.synonyms = SynonymMap()
# Api.categories is a dict mapping category names to a dict # Api.categories is a dict mapping category names to a dict
# describing the category. For categories representing a GL # describing the category. For categories representing a GL
# version, the dict entry looks like this: # version, the dict entry looks like this:
# #
# '2.1': { 'kind': 'GL', 'gl_10x_version': 21 } # '2.1': { 'kind': 'GL', 'gl_10x_version': 21 }
# #
# For categories representing an extension, the dict entry # For categories representing an extension, the dict entry
# looks like this: # looks like this:
# #
# 'GL_ARB_sync': { 'kind': 'extension', # 'GL_ARB_sync': { 'kind': 'extension',
# 'extension_name': 'GL_ARB_sync' } # 'extension_name': 'GL_ARB_sync' }
self.categories = {} self.categories = {}
# Convert each line in the gl.tm file into a key/value pair in # Convert each line in the gl.tm file into a key/value pair in
# self.type_translation, mapping an abstract type name to a C # self.type_translation, mapping an abstract type name to a C
...@@ -391,11 +391,11 @@ class Api(object): ...@@ -391,11 +391,11 @@ class Api(object):
'Function {0!r} parameter {1!r} uses unrecognized ' 'Function {0!r} parameter {1!r} uses unrecognized '
'direction {2!r}'.format( 'direction {2!r}'.format(
name, param_name, param_dir)) name, param_name, param_dir))
else: else:
raise Exception( raise Exception(
'Function {0!r} parameter {1!r} uses unrecognized ' 'Function {0!r} parameter {1!r} uses unrecognized '
'multiplicity {2!r}'.format( 'multiplicity {2!r}'.format(
name, param_name, param_multiplicity)) name, param_name, param_multiplicity))
param_types[param_index] = param_type param_types[param_index] = param_type
if len(attributes['return']) != 1: if len(attributes['return']) != 1:
raise Exception( raise Exception(
...@@ -413,7 +413,7 @@ class Api(object): ...@@ -413,7 +413,7 @@ class Api(object):
'return_type': self.type_translation[attributes['return'][0]], 'return_type': self.type_translation[attributes['return'][0]],
'param_names': param_names, 'param_names': param_names,
'param_types': param_types, 'param_types': param_types,
'category': category, 'category': category,
} }
self.synonyms.add_singleton(name) self.synonyms.add_singleton(name)
for alias in attributes['alias']: for alias in attributes['alias']:
...@@ -466,4 +466,4 @@ if __name__ == '__main__': ...@@ -466,4 +466,4 @@ if __name__ == '__main__':
with open(sys.argv[3]) as f: with open(sys.argv[3]) as f:
api.read_enumext_spec(f) api.read_enumext_spec(f)
with open(sys.argv[4], 'w') as f: with open(sys.argv[4], 'w') as f:
f.write(api.to_json()) f.write(api.to_json())
...@@ -34,23 +34,23 @@ import framework.core as core ...@@ -34,23 +34,23 @@ import framework.core as core
##### Main program ##### Main program
############################################################################# #############################################################################
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("results", parser.add_argument("results",
metavar = "<First Results File>", metavar = "<First Results File>",
nargs = "*", nargs = "*",
help = "Space seperated list of results files") help = "Space seperated list of results files")
args = parser.parse_args() args = parser.parse_args()
combined = core.loadTestResults(args.results.pop(0)) combined = core.loadTestResults(args.results.pop(0))
for resultsDir in args.results: for resultsDir in args.results:
results = core.loadTestResults(resultsDir) results = core.loadTestResults(resultsDir)
for testname, result in results.tests.items(): for testname, result in results.tests.items():
combined.tests[testname] = result combined.tests[testname] = result
combined.write(sys.stdout) combined.write(sys.stdout)
if __name__ == "__main__": if __name__ == "__main__":
main() main()
...@@ -39,62 +39,62 @@ from framework.gleantest import GleanTest ...@@ -39,62 +39,62 @@ from framework.gleantest import GleanTest
############################################################################# #############################################################################
def main(): def main():
parser = argparse.ArgumentParser(sys.argv) parser = argparse.ArgumentParser(sys.argv)
parser.add_argument("-t", "--include-tests", parser.add_argument("-t", "--include-tests",
default = [], default = [],
action = "append", action = "append",
metavar = "<regex>", metavar = "<regex>",
help = "Run only matching tests (can be used more than once)") help = "Run only matching tests (can be used more than once)")
parser.add_argument("--tests", parser.add_argument("--tests",
default = [], default = [],
action = "append", action = "append",
metavar = "<regex>", metavar = "<regex>",
help = "Run only matching tests (can be used more than once)" \ help = "Run only matching tests (can be used more than once)" \
"Deprecated") "Deprecated")
parser.add_argument("-x", "--exclude-tests", parser.add_argument("-x", "--exclude-tests",
default = [], default = [],
action = "append", action = "append",
metavar = "<regex>", metavar = "<regex>",
help = "Exclude matching tests (can be used more than once)") help = "Exclude matching tests (can be used more than once)")
parser.add_argument("testProfile", parser.add_argument("testProfile",
metavar = "<Path to testfile>", metavar = "<Path to testfile>",
help = "Path to results folder") help = "Path to results folder")
args = parser.parse_args() args = parser.parse_args()
# Deprecated # Deprecated
# --include-tests is the standard going forward, but for backwards # --include-tests is the standard going forward, but for backwards
# compatability merge args.tests into args.include_tests and drop # compatability merge args.tests into args.include_tests and drop
# duplicates # duplicates
if args.tests != []: if args.tests != []:
print "Warnings: Option --tests is deprecated, use --include-tests" print "Warnings: Option --tests is deprecated, use --include-tests"
args.include_tests = list(set(args.include_tests + args.tests)) args.include_tests = list(set(args.include_tests + args.tests))
# Set the environment, pass in the included and excluded tests
env = core.Environment(
exclude_filter=args.exclude_tests,
include_filter=args.include_tests,
)
# Change to the piglit's path # Set the environment, pass in the included and excluded tests
piglit_dir = path.dirname(path.realpath(sys.argv[0])) env = core.Environment(
os.chdir(piglit_dir) exclude_filter=args.exclude_tests,
include_filter=args.include_tests,
)
profile = core.loadTestProfile(args.testProfile) # Change to the piglit's path
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
os.chdir(piglit_dir)
def getCommand(test): profile = core.loadTestProfile(args.testProfile)
command = ''
if isinstance(test, GleanTest):
for var, val in test.env.items():
command += var + "='" + val + "' "
command += ' '.join(test.command)
return command
profile.prepare_test_list(env) def getCommand(test):
for name, test in profile.test_list.items(): command = ''
assert(isinstance(test, ExecTest)) if isinstance(test, GleanTest):
print name, ':::', getCommand(test) for var, val in test.env.items():
command += var + "='" + val + "' "
command += ' '.join(test.command)
return command
profile.prepare_test_list(env)
for name, test in profile.test_list.items():
assert(isinstance(test, ExecTest))
print name, ':::', getCommand(test)
if __name__ == "__main__": if __name__ == "__main__":
main() main()
This diff is collapsed.
This diff is collapsed.
...@@ -36,98 +36,98 @@ from framework import junit ...@@ -36,98 +36,98 @@ from framework import junit
class Writer: