...
 
Commits (2058)
......@@ -5,4 +5,5 @@ repos
build_root
summary.xml
results
test
\ No newline at end of file
test
tags
From 8c68dd6e483255138cb056174f5133f1579303b3 Mon Sep 17 00:00:00 2001
From: renchenglei <chenglei.ren@intel.com>
Date: Thu, 1 Aug 2019 07:23:52 +0300
Subject: [PATCH] use the private drm lib name
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
v2: add iris (Tapani)
Change-Id: I567da1c48e588b1a65c883323e92c2a34822b5d8
Signed-off-by: Yong Yao <yong.yao@intel.com>
Signed-off-by: Ren Chenglei <chenglei.ren@intel.com>
Signed-off-by: Tapani Pälli <tapani.palli@intel.com>
---
Android.common.mk | 2 +-
src/gallium/drivers/iris/Android.mk | 2 +-
src/gallium/winsys/i915/drm/Android.mk | 2 +-
src/gallium/winsys/iris/drm/Android.mk | 2 +-
src/intel/Android.vulkan.mk | 2 +-
src/mesa/drivers/dri/i915/Android.mk | 2 +-
6 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/Android.common.mk b/Android.common.mk
index 825b60381d..2431a69cc5 100644
--- a/Android.common.mk
+++ b/Android.common.mk
@@ -114,7 +114,7 @@ LOCAL_CFLAGS_arm64 += -DUSE_AARCH64_ASM
ifneq ($(LOCAL_IS_HOST_MODULE),true)
LOCAL_CFLAGS += -DHAVE_LIBDRM
-LOCAL_SHARED_LIBRARIES += libdrm
+LOCAL_SHARED_LIBRARIES += libdrm_pri
endif
LOCAL_CFLAGS_32 += -DDEFAULT_DRIVER_DIR=\"/vendor/lib/$(MESA_DRI_MODULE_REL_PATH)\"
diff --git a/src/gallium/drivers/iris/Android.mk b/src/gallium/drivers/iris/Android.mk
index 824ae7a0b1..70b9527bbf 100644
--- a/src/gallium/drivers/iris/Android.mk
+++ b/src/gallium/drivers/iris/Android.mk
@@ -154,7 +154,7 @@ LOCAL_C_INCLUDES := \
$(MESA_TOP)/include/drm-uapi \
$(MESA_TOP)/src/gallium/include
-LOCAL_SHARED_LIBRARIES := libdrm_intel
+LOCAL_SHARED_LIBRARIES := libdrm_intel_pri
LOCAL_STATIC_LIBRARIES := \
libmesa_intel_common \
diff --git a/src/gallium/winsys/i915/drm/Android.mk b/src/gallium/winsys/i915/drm/Android.mk
index bab3e85c5d..bc8cd0ebe2 100644
--- a/src/gallium/winsys/i915/drm/Android.mk
+++ b/src/gallium/winsys/i915/drm/Android.mk
@@ -30,7 +30,7 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(C_SOURCES)
-LOCAL_SHARED_LIBRARIES := libdrm_intel
+LOCAL_SHARED_LIBRARIES := libdrm_intel_pri
LOCAL_MODULE := libmesa_winsys_i915
include $(GALLIUM_COMMON_MK)
diff --git a/src/gallium/winsys/iris/drm/Android.mk b/src/gallium/winsys/iris/drm/Android.mk
index 820f32c3ec..4d1a3f2615 100644
--- a/src/gallium/winsys/iris/drm/Android.mk
+++ b/src/gallium/winsys/iris/drm/Android.mk
@@ -33,7 +33,7 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
libmesa_isl \
libmesa_intel_dev
-LOCAL_SHARED_LIBRARIES := libdrm_intel
+LOCAL_SHARED_LIBRARIES := libdrm_intel_pri
LOCAL_MODULE := libmesa_winsys_iris
include $(GALLIUM_COMMON_MK)
diff --git a/src/intel/Android.vulkan.mk b/src/intel/Android.vulkan.mk
index f66ab5bdfa..092e73faef 100644
--- a/src/intel/Android.vulkan.mk
+++ b/src/intel/Android.vulkan.mk
@@ -57,7 +57,7 @@ ANV_STATIC_LIBRARIES := \
libmesa_genxml \
libmesa_nir
-ANV_SHARED_LIBRARIES := libdrm
+ANV_SHARED_LIBRARIES := libdrm_pri
ifeq ($(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6 7),)
ANV_SHARED_LIBRARIES += libnativewindow
diff --git a/src/mesa/drivers/dri/i915/Android.mk b/src/mesa/drivers/dri/i915/Android.mk
index b1054aa6e2..7c9c8210df 100644
--- a/src/mesa/drivers/dri/i915/Android.mk
+++ b/src/mesa/drivers/dri/i915/Android.mk
@@ -47,7 +47,7 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
LOCAL_SHARED_LIBRARIES := \
$(MESA_DRI_SHARED_LIBRARIES) \
- libdrm_intel
+ libdrm_intel_pri
LOCAL_GENERATED_SOURCES := \
$(MESA_DRI_OPTIONS_H) \
From e6180fb469dc86bdbf40bb0ec14a21bc739ba497 Mon Sep 17 00:00:00 2001
From: renchenglei <chenglei.ren@intel.com>
Date: Thu, 27 Jun 2019 18:04:50 +0800
Subject: [PATCH] WA for random vulkan link error on Celadon
We may encounter random vulkan link error on build bot
with a low reproduced rate(5%). This WA to help fix the
random link issue.
Tracked-On: OAM-83401
Signed-off-by: Chenglei Ren <chenglei.ren@intel.com>
---
src/intel/vulkan/anv_util.c | 71 ++++++++++++++++++++++++++++++++++++-
1 file changed, 70 insertions(+), 1 deletion(-)
diff --git a/src/intel/vulkan/anv_util.c b/src/intel/vulkan/anv_util.c
index 1159ccecc6..32aa1e5ee2 100644
--- a/src/intel/vulkan/anv_util.c
+++ b/src/intel/vulkan/anv_util.c
@@ -76,6 +76,75 @@ __anv_perf_warn(struct anv_instance *instance, const void *object,
intel_logw("%s:%d: PERF: %s", file, line, buffer);
}
+const char *
+vk_Result_to_str_pri(VkResult input)
+{
+ switch(input) {
+ case -1000174001:
+ return "VK_ERROR_NOT_PERMITTED_EXT";
+ case -1000161000:
+ return "VK_ERROR_FRAGMENTATION_EXT";
+ case -1000158000:
+ return "VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT";
+ case -1000072003:
+ return "VK_ERROR_INVALID_EXTERNAL_HANDLE";
+ case -1000069000:
+ return "VK_ERROR_OUT_OF_POOL_MEMORY";
+ case -1000012000:
+ return "VK_ERROR_INVALID_SHADER_NV";
+ case -1000011001:
+ return "VK_ERROR_VALIDATION_FAILED_EXT";
+ case -1000003001:
+ return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
+ case -1000001004:
+ return "VK_ERROR_OUT_OF_DATE_KHR";
+ case -1000000001:
+ return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+ case -1000000000:
+ return "VK_ERROR_SURFACE_LOST_KHR";
+ case -12:
+ return "VK_ERROR_FRAGMENTED_POOL";
+ case -11:
+ return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+ case -10:
+ return "VK_ERROR_TOO_MANY_OBJECTS";
+ case -9:
+ return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ case -8:
+ return "VK_ERROR_FEATURE_NOT_PRESENT";
+ case -7:
+ return "VK_ERROR_EXTENSION_NOT_PRESENT";
+ case -6:
+ return "VK_ERROR_LAYER_NOT_PRESENT";
+ case -5:
+ return "VK_ERROR_MEMORY_MAP_FAILED";
+ case -4:
+ return "VK_ERROR_DEVICE_LOST";
+ case -3:
+ return "VK_ERROR_INITIALIZATION_FAILED";
+ case -2:
+ return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ case -1:
+ return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ case 0:
+ return "VK_SUCCESS";
+ case 1:
+ return "VK_NOT_READY";
+ case 2:
+ return "VK_TIMEOUT";
+ case 3:
+ return "VK_EVENT_SET";
+ case 4:
+ return "VK_EVENT_RESET";
+ case 5:
+ return "VK_INCOMPLETE";
+ case 1000001003:
+ return "VK_SUBOPTIMAL_KHR";
+ default:
+ unreachable("Undefined enum value.");
+ }
+}
+
VkResult
__vk_errorv(struct anv_instance *instance, const void *object,
VkDebugReportObjectTypeEXT type, VkResult error,
@@ -84,7 +153,7 @@ __vk_errorv(struct anv_instance *instance, const void *object,
char buffer[256];
char report[512];
- const char *error_str = vk_Result_to_str(error);
+ const char *error_str = vk_Result_to_str_pri(error);
if (format) {
vsnprintf(buffer, sizeof(buffer), format, ap);
#!/usr/bin/python
import os
import sys
import os.path as path
sys.path.append(path.join(path.dirname(path.abspath(sys.argv[0])), "..",
"repos", "mesa_ci"))
import build_support as bs
def main():
# Disable test if using < Mesa 18.0
pm = bs.ProjectMap()
sd = pm.project_source_dir("mesa")
if not os.path.exists(os.path.join(sd,
'src/mesa/drivers/osmesa/meson.build')):
return 0
# The android source tree used does not support Mesa <19.1
if not os.path.exists(os.path.join(sd,
'docs/relnotes/19.1.0.html')):
print("Android build test does not support this version of Mesa. No "
"build test will be performed.")
return 0
bs.build(bs.AndroidBuilder(src_location="~/android-ia",
modules=["libGLES_mesa",
"vulkan.broxton"]))
if __name__ == '__main__':
main()
This diff is collapsed.
# Copyright (C) Intel Corp. 2014. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **********************************************************************/
# * Authors:
# * Mark Janes <mark.a.janes@intel.com>
# **********************************************************************/
#!/usr/bin/python
import time
import os
from command import *
#from command import killMajorProcesses
from options import *
from project_map import ProjectMap
#from export import Export
#from export import ExportTest
#from export import ExportSymbols
#from jenkins import Jenkins
#from spec import BuildInvoke
#from spec import BuildSpec
#from export import ExportZip
#from gtest import GTest as GTest
#from clean_server import CleanServer
from repo_set import *
from dependency_graph import DependencyGraph
from export import Export, convert_rsync_path
from gtest import *
from jenkins import *
from bisect_test import *
from builders import *
from timer import TimeOut
from deqp_builder import *
from perf_builder import *
class DefaultTimeout:
def __init__(self, options=None):
self._options = options
if not options:
self._options = Options()
def GetDuration(self):
"""by default, components should finish in under 15 minutes.
For daily builds, 60 minutes is acceptable."""
if self._options.type == "daily" or self._options.type == "release":
return 120
return 15
def null_build():
pass
class NullInvoke:
"""masquerades as an invoke object, so the main routine can post
results even if there is no server to post to"""
def __init__(self):
pass
def set_info(self, *args):
pass
def set_status(self, *args):
pass
def build(builder, options=None, time_limit=None):
if not time_limit:
time_limit = DefaultTimeout()
if not options:
options = Options()
action_map = [
("clean", builder.clean),
("build", builder.build),
("test", builder.test),
]
actions = options.action
invoke = NullInvoke()
if os.environ.has_key("PKG_CONFIG_PATH"):
del os.environ["PKG_CONFIG_PATH"]
if os.environ.has_key("LD_LIBRARY_PATH"):
del os.environ["LD_LIBRARY_PATH"]
if os.environ.has_key("LIBGL_DRIVERS_PATH"):
del os.environ["LIBGL_DRIVERS_PATH"]
# TODO: add this stuff
if (options.result_path):
# if we aren't posting to a server, don't attempt to write
# status
invoke = ProjectInvoke(options)
invoke.set_info("start_time", time.time())
# start a thread to limit the run-time of the build
to = TimeOut(time_limit)
to.start()
if options.hardware != "builder" and check_gpu_hang():
return
Export().import_build_root()
if type(actions) is str:
actions = [actions]
# clean out the test results directory, so jenkins processes only
# the files for the current build
if "test" in actions:
test_out_dir = ProjectMap().source_root() + "/test"
if os.path.exists(test_out_dir):
rmtree(test_out_dir)
# Walk through the possible actions in order, if those actions are not
# requested go on. The order does matter.
for k, a in action_map:
if k not in actions:
continue
options.action = a
try:
a()
except:
# we need to cancel the timer first, in case
# set_status fails, and the timer is left running
to.end()
invoke.set_info("status", "failed")
# must cancel timeout timer, which will prevent process from ending
raise
# must cancel timeout timer, which will prevent process from
# ending. cancel the timer first, in case set_status fails, and
# the timer is left running
to.end()
invoke.set_info("end_time", time.time())
invoke.set_info("status", "success")
This diff is collapsed.
This diff is collapsed.
# Copyright (C) Intel Corp. 2014. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **********************************************************************/
# * Authors:
# * Mark Janes <mark.a.janes@intel.com>
# **********************************************************************/
"""handles abstraction of command execution on unix and windows"""
import os, subprocess, sys, shutil, atexit, signal, stat
import platform
def killMajorProcesses():
return
# keep a list of subprocess, so they can be killed if the timeout
# mechanism expires
all_processes = {}
def kill_all_subprocesses():
global all_processes
sys.stdout.flush()
for p in all_processes.keys():
if p.poll() is None:
# in unix, any subprocess the executes another command
# will create a process that won't be killed. We uses
# process groups to manage
# this.
# http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
os.killpg(p.pid, signal.SIGTERM)
try:
p.terminate()
p.kill()
except:
# If we couldn't kill the process, ignore errors if
# the process ended on it's own
if p.poll():
return
else:
# otherwise raise so we know a process is stuck
print "Couldn't kill {0}".format(p)
raise
all_processes = {}
# signal has a different signature
def kill_all_subprocesses_signal(ignore, _):
kill_all_subprocesses()
# register routine to kill subprocesses
atexit.register(kill_all_subprocesses)
signal.signal(signal.SIGINT, kill_all_subprocesses_signal)
signal.signal(signal.SIGABRT, kill_all_subprocesses_signal)
signal.signal(signal.SIGTERM, kill_all_subprocesses_signal)
signal.signal(signal.SIGFPE, kill_all_subprocesses_signal)
signal.signal(signal.SIGILL, kill_all_subprocesses_signal)
signal.signal(signal.SIGSEGV, kill_all_subprocesses_signal)
def system():
return platform.system().lower()
def run_batch_command(commands, streamedOutput=True, noop=False, env = None,
expected_return_code=0, quiet=False, stdinput=None):
if not env:
env = {}
# first command needs to have only \ path separators
envStrs = [a[0]+"="+a[1] for a in env.items()]
if not quiet:
print " ".join(envStrs) + " " + " ".join(commands)
sys.stdout.flush()
procEnv = dict(os.environ.items() + env.items())
if noop:
return 0
if streamedOutput is True:
p = subprocess.Popen(commands, env=procEnv,
preexec_fn=os.setsid,
stdin=stdinput)
else:
p = subprocess.Popen(commands, env=procEnv,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=stdinput)
global all_processes
all_processes[p] = True
(out, err) = p.communicate()
if (expected_return_code != None):
if p.returncode != expected_return_code:
print "ERROR: command failed"
if out:
print "STDOUT: " + out
if err:
print "STDERR: " + err
sys.stdout.flush()
sys.stderr.flush()
raise subprocess.CalledProcessError(p.returncode,commands)
if p in all_processes:
del all_processes[p]
if out and not quiet:
print out
if err and not quiet:
print err
return (out,err)
def on_error(func, path, _):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it prints an error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
# Is the error an access error ?
os.chmod(path, stat.S_IWRITE)
try:
func(path)
except:
print "encountered deletion error for path: " + path
raise
def rmfile(filename):
# remove a read only file
try:
os.remove(filename)
except OSError, e:
on_error(rmfile, filename, None)
def rmtree(in_path):
# DE3015 - if the tree has any unicode file/dir names in it, we
# have to start with a unicode string.
unicode_path = unicode(in_path)
if os.path.exists(unicode_path):
print "Deleting {0}".format(unicode_path)
if os.path.isdir(unicode_path):
shutil.rmtree(unicode_path, onerror=on_error)
else:
rmfile(unicode_path)
# Copyright (C) Intel Corp. 2014. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **********************************************************************/
# * Authors:
# * Mark Janes <mark.a.janes@intel.com>
# **********************************************************************/
import sys
from . import ProjectInvoke
from . import ProjectMap
class DependencyGraph:
"""Calculates build order for all prerequisites"""
def __init__(self, components, options):
# The key of the graph dict is ProjectInvoke hash string, and
# the value is a list of ProjectInvoke hashes that it depends on
self._dependency_graph = {}
# List of build invoke objects that are complete
self._completed_builds = []
# Key is a build invoke, value is a list of build invokes that
# require it
self._completion_graph = {}
# key is project name, value is project tag
self._project_tags = {}
build_spec = ProjectMap().build_spec()
projects = build_spec.find("projects")
for project in projects.findall("project"):
self._project_tags[project.attrib["name"]] = project
# build up the dependency_graph
if type(components) != type([]):
components = [components]
for a_component in components:
bi = ProjectInvoke(project=a_component,
options=options)
self.add_to_graph(bi)
def ready_builds(self, filter_builds=None):
"""provide a list of builds which have all prerequisites
satisfied."""
ret_list = []
if not filter_builds:
filter_builds = []
for (component, prereqs) in self._dependency_graph.iteritems():
if not prereqs:
ret_list.append(ProjectInvoke(from_string=component))
filter_builds_str = [str(b) for b in filter_builds]
ret_list = [j for j in ret_list
if str(j) not in filter_builds_str]
return ret_list
def all_builds(self):
ret_list = []
for k in self._dependency_graph.keys():
ret_list.append(ProjectInvoke(from_string=k))
return ret_list
def build_complete(self, build):
"""notifies the DependencyGraph that a build has completed.
Makes other builds available via ready_builds"""
if not self._completion_graph:
# build up the completion_graph on the first call.
for (component, prereqs) in self._dependency_graph.items():
if not self._completion_graph.has_key(component):
self._completion_graph[component] = []
for a_prereq in prereqs:
if not self._completion_graph.has_key(a_prereq):
self._completion_graph[a_prereq] = []
if component not in self._completion_graph[a_prereq]:
self._completion_graph[a_prereq].append(component)
build = str(build)
del self._dependency_graph[build]
for an_unblocked_component in self._completion_graph[build]:
if an_unblocked_component in self._dependency_graph:
self._dependency_graph[an_unblocked_component].remove(build)
del self._completion_graph[build]
@classmethod
def long_pole(cls, invoke):
"""returns a list of invokes composing the long pole of the build"""
depGraph = cls(invoke.project,
invoke.options)
blocking_builds = [invoke]
while True:
last_build = None
last_finish_time = 0
for a_dep in depGraph._dependency_graph[str(invoke)]:
a_dep = ProjectInvoke(from_string=a_dep)
end_time = a_dep.get_info("end_time")
if not end_time:
continue
if end_time > last_finish_time:
last_build = a_dep
last_finish_time = end_time
if not last_build:
return blocking_builds
blocking_builds.append(last_build)
invoke = last_build
def _prereqs(self, project_invoke):
results = []
tags = self._project_tags[project_invoke.project]
for a_prereq in tags.findall("prerequisite"):
# make a deep copy of the project_invoke, which will be
# updated by the prereq
# allow the build specification to make prerequisites
# contingent on the type. If type is specified for the
# prereq, then it is only built for matching types.
attrib = a_prereq.attrib
if attrib.has_key("only_for_type"):
types = attrib["only_for_type"].split(",")
if project_invoke.options.type not in types:
continue
arches = [project_invoke.options.arch]
if attrib.has_key("arch"):
arches = attrib["arch"].split(",")
hardwares = [project_invoke.options.hardware]
if attrib.has_key("hardware"):
hardwares = attrib["hardware"].split(",")
for arch in arches:
for hardware in hardwares:
shards = ["0"]
p_shard = "0"
if a_prereq.attrib.has_key("shard"):
p_shard = a_prereq.attrib["shard"]
if p_shard != "0" and ":" not in p_shard:
i_shards = range(0,int(p_shard))
shards = [str(s + 1) + ":" + p_shard for s in i_shards]
for shard in shards:
pistr = str(project_invoke)
prereq_invoke = ProjectInvoke(from_string=pistr)
prereq_invoke.project = attrib["name"]
prereq_invoke.options.hardware = hardware
prereq_invoke.options.arch = arch
prereq_invoke.options.shard = shard
results.append(prereq_invoke)
return results
def add_to_graph(self, project_invoke):
"""adds the build_invoke and all prerequisites to the
_dependency_graph"""
shard = project_invoke.options.shard
if shard != "0" and ":" not in shard:
for i in range(0,int(shard)):
# make a deep copy of the project invoke
p = ProjectInvoke(from_string=str(project_invoke))
p.shard = str(i + 1) + ":" + str(shard)
self.add_to_graph(p)