Commit d0f6d81a authored by Dylan Baker's avatar Dylan Baker

framework: Add support for jsonstreams

This commit adds support in the json backend for using an external
library that I wrote called jsonstreams. It's a pretty self-explanatory
library, and for piglit has several advantages. First, I've measured
a consistent 10-15 second speed up for running the quick profile.
Second, it *vastly* reduces the amount of memory piglit needs to write
the final JSON document out.

This is not implemented as a separate backend because the way that
piglit's backend are implemented only one backend can handle a specific
file extension. While this can be worked around by setting the
extension to something like '.junit.xml' that doesn't really make sense
here, since they are writing the same format, just using different
means.
Signed-off-by: 's avatarDylan Baker <dylanx.c.baker@intel.com>
parent c3a6865f
......@@ -44,6 +44,8 @@ Optionally, you can install the following:
- lxml. An accelerated python xml library using libxml2 (http://lxml.de/)
- simplejson. A fast C based implementation of the python json library.
(https://simplejson.readthedocs.org/en/latest/)
- jsonstreams. A JSON stream writer for python.
(https://jsonstreams.readthedocs.io/en/stable/)
For Python 2.x you can install the following to add features, these are
unnecessary for python3:
......
......@@ -24,6 +24,7 @@ from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import collections
import functools
import os
import posixpath
import shutil
......@@ -35,6 +36,11 @@ except ImportError:
import json
import six
try:
import jsonstreams
_STREAMS = True
except ImportError:
_STREAMS = False
from framework import status, results, exceptions, compat
from .abstract import FileBackend, write_compressed
......@@ -130,41 +136,78 @@ class JSONBackend(FileBackend):
containers that are still open and closes the file
"""
# Create a dictionary that is full of data to be written to a single
# file
data = collections.OrderedDict()
# Load the metadata and put it into a dictionary
with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
data.update(json.load(f))
# If there is more metadata add it the dictionary
if metadata:
data.update(metadata)
# Add the tests to the dictionary
data['tests'] = collections.OrderedDict()
test_dir = os.path.join(self._dest, 'tests')
for test in os.listdir(test_dir):
test = os.path.join(test_dir, test)
if os.path.isfile(test):
# Try to open the json snippets. If we fail to open a test then
# throw the whole thing out. This gives us atomic writes, the
# writing worked and is valid or it didn't work.
try:
with open(test, 'r') as f:
data['tests'].update(json.load(f, object_hook=piglit_decoder))
except ValueError:
pass
assert data['tests']
data = results.TestrunResult.from_dict(data)
# If jsonstreams is not present then build a complete tree of all of
# the data and write it with json.dump
if not _STREAMS:
# Create a dictionary that is full of data to be written to a
# single file
data = collections.OrderedDict()
# Load the metadata and put it into a dictionary
with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
data.update(json.load(f))
# If there is more metadata add it the dictionary
if metadata:
data.update(metadata)
# Add the tests to the dictionary
data['tests'] = collections.OrderedDict()
test_dir = os.path.join(self._dest, 'tests')
for test in os.listdir(test_dir):
test = os.path.join(test_dir, test)
if os.path.isfile(test):
# Try to open the json snippets. If we fail to open a test
# then throw the whole thing out. This gives us atomic
# writes, the writing worked and is valid or it didn't
# work.
try:
with open(test, 'r') as f:
data['tests'].update(
json.load(f, object_hook=piglit_decoder))
except ValueError:
pass
assert data['tests']
data = results.TestrunResult.from_dict(data)
# write out the combined file. Use the compression writer from the
# FileBackend
with self._write_final(os.path.join(self._dest, 'results.json')) as f:
json.dump(data, f, default=piglit_encoder, indent=INDENT)
# Otherwise use jsonstreams to write the final dictionary. This uses an
# external library, but is slightly faster and uses considerably less
# memory that building a complete tree.
else:
encoder = functools.partial(json.JSONEncoder, default=piglit_encoder)
with self._write_final(os.path.join(self._dest, 'results.json')) as f:
with jsonstreams.Stream(jsonstreams.Type.object, fd=f, indent=4,
encoder=encoder, pretty=True) as s:
s.write('__type__', 'TestrunResult')
with open(os.path.join(self._dest, 'metadata.json'),
'r') as n:
s.iterwrite(six.iteritems(json.load(n)))
if metadata:
s.iterwrite(six.iteritems(metadata))
test_dir = os.path.join(self._dest, 'tests')
with s.subobject('tests') as t:
for test in os.listdir(test_dir):
test = os.path.join(test_dir, test)
if os.path.isfile(test):
try:
with open(test, 'r') as f:
a = json.load(
f, object_hook=piglit_decoder)
except ValueError:
continue
t.iterwrite(six.iteritems(a))
# write out the combined file. Use the compression writer from the
# FileBackend
with self._write_final(os.path.join(self._dest, 'results.json')) as f:
json.dump(data, f, default=piglit_encoder, indent=INDENT)
# Delete the temporary files
os.unlink(os.path.join(self._dest, 'metadata.json'))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment