jolt 0.9.123__py3-none-any.whl → 0.9.435__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jolt/__init__.py +80 -7
- jolt/__main__.py +9 -1
- jolt/bin/fstree-darwin-x86_64 +0 -0
- jolt/bin/fstree-linux-x86_64 +0 -0
- jolt/cache.py +832 -362
- jolt/chroot.py +156 -0
- jolt/cli.py +281 -162
- jolt/common_pb2.py +63 -0
- jolt/common_pb2_grpc.py +4 -0
- jolt/config.py +98 -41
- jolt/error.py +19 -4
- jolt/filesystem.py +2 -6
- jolt/graph.py +705 -117
- jolt/hooks.py +43 -0
- jolt/influence.py +122 -3
- jolt/loader.py +369 -121
- jolt/log.py +225 -63
- jolt/manifest.py +28 -38
- jolt/options.py +35 -10
- jolt/pkgs/abseil.py +42 -0
- jolt/pkgs/asio.py +25 -0
- jolt/pkgs/autoconf.py +41 -0
- jolt/pkgs/automake.py +41 -0
- jolt/pkgs/b2.py +31 -0
- jolt/pkgs/boost.py +111 -0
- jolt/pkgs/boringssl.py +32 -0
- jolt/pkgs/busybox.py +39 -0
- jolt/pkgs/bzip2.py +43 -0
- jolt/pkgs/cares.py +29 -0
- jolt/pkgs/catch2.py +36 -0
- jolt/pkgs/cbindgen.py +17 -0
- jolt/pkgs/cista.py +19 -0
- jolt/pkgs/clang.py +44 -0
- jolt/pkgs/cli11.py +24 -0
- jolt/pkgs/cmake.py +48 -0
- jolt/pkgs/cpython.py +196 -0
- jolt/pkgs/crun.py +29 -0
- jolt/pkgs/curl.py +38 -0
- jolt/pkgs/dbus.py +18 -0
- jolt/pkgs/double_conversion.py +24 -0
- jolt/pkgs/fastfloat.py +21 -0
- jolt/pkgs/ffmpeg.py +28 -0
- jolt/pkgs/flatbuffers.py +29 -0
- jolt/pkgs/fmt.py +27 -0
- jolt/pkgs/fstree.py +20 -0
- jolt/pkgs/gflags.py +18 -0
- jolt/pkgs/glib.py +18 -0
- jolt/pkgs/glog.py +25 -0
- jolt/pkgs/glslang.py +21 -0
- jolt/pkgs/golang.py +16 -11
- jolt/pkgs/googlebenchmark.py +18 -0
- jolt/pkgs/googletest.py +46 -0
- jolt/pkgs/gperf.py +15 -0
- jolt/pkgs/grpc.py +73 -0
- jolt/pkgs/hdf5.py +19 -0
- jolt/pkgs/help2man.py +14 -0
- jolt/pkgs/inja.py +28 -0
- jolt/pkgs/jsoncpp.py +31 -0
- jolt/pkgs/libarchive.py +43 -0
- jolt/pkgs/libcap.py +44 -0
- jolt/pkgs/libdrm.py +44 -0
- jolt/pkgs/libedit.py +42 -0
- jolt/pkgs/libevent.py +31 -0
- jolt/pkgs/libexpat.py +27 -0
- jolt/pkgs/libfastjson.py +21 -0
- jolt/pkgs/libffi.py +16 -0
- jolt/pkgs/libglvnd.py +30 -0
- jolt/pkgs/libogg.py +28 -0
- jolt/pkgs/libpciaccess.py +18 -0
- jolt/pkgs/libseccomp.py +21 -0
- jolt/pkgs/libtirpc.py +24 -0
- jolt/pkgs/libtool.py +42 -0
- jolt/pkgs/libunwind.py +35 -0
- jolt/pkgs/libva.py +18 -0
- jolt/pkgs/libvorbis.py +33 -0
- jolt/pkgs/libxml2.py +35 -0
- jolt/pkgs/libxslt.py +17 -0
- jolt/pkgs/libyajl.py +16 -0
- jolt/pkgs/llvm.py +81 -0
- jolt/pkgs/lua.py +54 -0
- jolt/pkgs/lz4.py +26 -0
- jolt/pkgs/m4.py +14 -0
- jolt/pkgs/make.py +17 -0
- jolt/pkgs/mesa.py +81 -0
- jolt/pkgs/meson.py +17 -0
- jolt/pkgs/mstch.py +28 -0
- jolt/pkgs/mysql.py +60 -0
- jolt/pkgs/nasm.py +49 -0
- jolt/pkgs/ncurses.py +30 -0
- jolt/pkgs/ng_log.py +25 -0
- jolt/pkgs/ninja.py +45 -0
- jolt/pkgs/nlohmann_json.py +25 -0
- jolt/pkgs/nodejs.py +19 -11
- jolt/pkgs/opencv.py +24 -0
- jolt/pkgs/openjdk.py +26 -0
- jolt/pkgs/openssl.py +103 -0
- jolt/pkgs/paho.py +76 -0
- jolt/pkgs/patchelf.py +16 -0
- jolt/pkgs/perl.py +42 -0
- jolt/pkgs/pkgconfig.py +64 -0
- jolt/pkgs/poco.py +39 -0
- jolt/pkgs/protobuf.py +77 -0
- jolt/pkgs/pugixml.py +27 -0
- jolt/pkgs/python.py +19 -0
- jolt/pkgs/qt.py +35 -0
- jolt/pkgs/rapidjson.py +26 -0
- jolt/pkgs/rapidyaml.py +28 -0
- jolt/pkgs/re2.py +30 -0
- jolt/pkgs/re2c.py +17 -0
- jolt/pkgs/readline.py +15 -0
- jolt/pkgs/rust.py +41 -0
- jolt/pkgs/sdl.py +28 -0
- jolt/pkgs/simdjson.py +27 -0
- jolt/pkgs/soci.py +46 -0
- jolt/pkgs/spdlog.py +29 -0
- jolt/pkgs/spirv_llvm.py +21 -0
- jolt/pkgs/spirv_tools.py +24 -0
- jolt/pkgs/sqlite.py +83 -0
- jolt/pkgs/ssl.py +12 -0
- jolt/pkgs/texinfo.py +15 -0
- jolt/pkgs/tomlplusplus.py +22 -0
- jolt/pkgs/wayland.py +26 -0
- jolt/pkgs/x11.py +58 -0
- jolt/pkgs/xerces_c.py +20 -0
- jolt/pkgs/xorg.py +360 -0
- jolt/pkgs/xz.py +29 -0
- jolt/pkgs/yamlcpp.py +30 -0
- jolt/pkgs/zeromq.py +47 -0
- jolt/pkgs/zlib.py +87 -0
- jolt/pkgs/zstd.py +33 -0
- jolt/plugins/alias.py +3 -0
- jolt/plugins/allure.py +5 -2
- jolt/plugins/autotools.py +66 -0
- jolt/plugins/cache.py +133 -0
- jolt/plugins/cmake.py +74 -6
- jolt/plugins/conan.py +238 -0
- jolt/plugins/cxx.py +698 -0
- jolt/plugins/cxxinfo.py +7 -0
- jolt/plugins/dashboard.py +1 -1
- jolt/plugins/docker.py +80 -23
- jolt/plugins/email.py +2 -2
- jolt/plugins/email.xslt +144 -101
- jolt/plugins/environ.py +11 -0
- jolt/plugins/fetch.py +141 -0
- jolt/plugins/gdb.py +39 -19
- jolt/plugins/gerrit.py +1 -14
- jolt/plugins/git.py +283 -85
- jolt/plugins/googletest.py +2 -1
- jolt/plugins/http.py +36 -38
- jolt/plugins/libtool.py +63 -0
- jolt/plugins/linux.py +990 -0
- jolt/plugins/logstash.py +4 -4
- jolt/plugins/meson.py +61 -0
- jolt/plugins/ninja-compdb.py +99 -30
- jolt/plugins/ninja.py +468 -166
- jolt/plugins/paths.py +11 -1
- jolt/plugins/pkgconfig.py +219 -0
- jolt/plugins/podman.py +136 -92
- jolt/plugins/python.py +137 -0
- jolt/plugins/remote_execution/__init__.py +0 -0
- jolt/plugins/remote_execution/administration_pb2.py +46 -0
- jolt/plugins/remote_execution/administration_pb2_grpc.py +170 -0
- jolt/plugins/remote_execution/log_pb2.py +32 -0
- jolt/plugins/remote_execution/log_pb2_grpc.py +68 -0
- jolt/plugins/remote_execution/scheduler_pb2.py +41 -0
- jolt/plugins/remote_execution/scheduler_pb2_grpc.py +141 -0
- jolt/plugins/remote_execution/worker_pb2.py +38 -0
- jolt/plugins/remote_execution/worker_pb2_grpc.py +112 -0
- jolt/plugins/report.py +12 -2
- jolt/plugins/rust.py +25 -0
- jolt/plugins/scheduler.py +710 -0
- jolt/plugins/selfdeploy/setup.py +8 -4
- jolt/plugins/selfdeploy.py +138 -88
- jolt/plugins/strings.py +35 -22
- jolt/plugins/symlinks.py +26 -11
- jolt/plugins/telemetry.py +5 -2
- jolt/plugins/timeline.py +13 -3
- jolt/plugins/volume.py +46 -48
- jolt/scheduler.py +589 -192
- jolt/tasks.py +625 -121
- jolt/templates/timeline.html.template +44 -47
- jolt/timer.py +22 -0
- jolt/tools.py +638 -282
- jolt/utils.py +211 -7
- jolt/version.py +1 -1
- jolt/xmldom.py +12 -2
- {jolt-0.9.123.dist-info → jolt-0.9.435.dist-info}/METADATA +97 -38
- jolt-0.9.435.dist-info/RECORD +207 -0
- {jolt-0.9.123.dist-info → jolt-0.9.435.dist-info}/WHEEL +1 -1
- jolt/plugins/amqp.py +0 -834
- jolt/plugins/debian.py +0 -338
- jolt/plugins/ftp.py +0 -181
- jolt/plugins/repo.py +0 -253
- jolt-0.9.123.dist-info/RECORD +0 -77
- {jolt-0.9.123.dist-info → jolt-0.9.435.dist-info}/entry_points.txt +0 -0
- {jolt-0.9.123.dist-info → jolt-0.9.435.dist-info}/top_level.txt +0 -0
jolt/graph.py
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
1
|
-
from contextlib import contextmanager
|
|
1
|
+
from contextlib import contextmanager, ExitStack, nullcontext
|
|
2
2
|
import copy
|
|
3
3
|
import hashlib
|
|
4
4
|
from os import getenv
|
|
5
5
|
from threading import RLock
|
|
6
6
|
from collections import OrderedDict
|
|
7
7
|
import uuid
|
|
8
|
+
import socket
|
|
9
|
+
import sys
|
|
8
10
|
|
|
9
|
-
from jolt
|
|
10
|
-
from jolt
|
|
11
|
+
from jolt import cli
|
|
12
|
+
from jolt import common_pb2 as common_pb
|
|
13
|
+
from jolt import config
|
|
11
14
|
from jolt import log
|
|
12
15
|
from jolt import utils
|
|
13
16
|
from jolt import colors
|
|
@@ -15,19 +18,34 @@ from jolt import hooks
|
|
|
15
18
|
from jolt import filesystem as fs
|
|
16
19
|
from jolt.error import raise_error_if
|
|
17
20
|
from jolt.error import raise_task_error_if
|
|
21
|
+
from jolt.influence import HashInfluenceRegistry, TaskRequirementInfluence
|
|
18
22
|
from jolt.options import JoltOptions
|
|
23
|
+
from jolt.tasks import Alias, Resource, WorkspaceResource
|
|
19
24
|
|
|
20
25
|
|
|
21
26
|
class TaskProxy(object):
|
|
22
|
-
def __init__(self, task, graph, options):
|
|
27
|
+
def __init__(self, task, graph, cache, options):
|
|
23
28
|
self.task = task
|
|
24
29
|
self.graph = graph
|
|
30
|
+
self.cache = cache
|
|
25
31
|
self.options = options
|
|
26
32
|
|
|
33
|
+
# Direct and transitive dependencies.
|
|
34
|
+
# The dependency chain is broken at
|
|
35
|
+
# selfsustained dependencies that don't
|
|
36
|
+
# require their own dependencies anymore
|
|
37
|
+
# after being executed.
|
|
27
38
|
self.children = []
|
|
28
39
|
self.ancestors = set()
|
|
40
|
+
|
|
41
|
+
# All direct and transitive dependencies.
|
|
42
|
+
# Unlike 'children', this set is not filtered
|
|
43
|
+
# from selfsustained tasks.
|
|
29
44
|
self.descendants = set()
|
|
45
|
+
|
|
46
|
+
# Unfiltered direct dependencies.
|
|
30
47
|
self.neighbors = []
|
|
48
|
+
|
|
31
49
|
self.extensions = []
|
|
32
50
|
self.duration_queued = None
|
|
33
51
|
self.duration_running = None
|
|
@@ -40,11 +58,32 @@ class TaskProxy(object):
|
|
|
40
58
|
self._download = True
|
|
41
59
|
self._local = False
|
|
42
60
|
self._network = False
|
|
43
|
-
|
|
61
|
+
self._artifacts = []
|
|
62
|
+
self._status = None
|
|
63
|
+
self._finalized = False
|
|
64
|
+
|
|
65
|
+
# Consumer task if this is a resource
|
|
66
|
+
self._owner = None
|
|
67
|
+
|
|
68
|
+
# List of all artifacts that are produced by this task
|
|
69
|
+
self._artifacts = []
|
|
44
70
|
|
|
45
71
|
def __hash__(self):
|
|
46
72
|
return id(self)
|
|
47
73
|
|
|
74
|
+
@property
|
|
75
|
+
def artifacts(self):
|
|
76
|
+
return self._artifacts
|
|
77
|
+
|
|
78
|
+
def add_artifact(self, artifact):
|
|
79
|
+
self._artifacts.append(artifact)
|
|
80
|
+
|
|
81
|
+
def get_artifact(self, name):
|
|
82
|
+
for artifact in self.artifacts:
|
|
83
|
+
if artifact.name == name:
|
|
84
|
+
return artifact
|
|
85
|
+
return None
|
|
86
|
+
|
|
48
87
|
@property
|
|
49
88
|
def tools(self):
|
|
50
89
|
return self.task.tools
|
|
@@ -65,12 +104,33 @@ class TaskProxy(object):
|
|
|
65
104
|
def short_qualified_name(self):
|
|
66
105
|
return self.task.short_qualified_name
|
|
67
106
|
|
|
107
|
+
@property
|
|
108
|
+
def exported_name(self):
|
|
109
|
+
return self.task.exported_name
|
|
110
|
+
|
|
68
111
|
@property
|
|
69
112
|
def log_name(self):
|
|
70
113
|
return "({0} {1})".format(self.short_qualified_name, self.identity[:8])
|
|
71
114
|
|
|
115
|
+
def log_running_time(self):
|
|
116
|
+
""" Emits an information log line every 5 mins a task has been running. """
|
|
117
|
+
if not self.is_running():
|
|
118
|
+
return
|
|
119
|
+
minutes = int(self.duration_running.seconds / 60)
|
|
120
|
+
if (minutes % 5) == 0 and minutes > 0:
|
|
121
|
+
if minutes >= 60:
|
|
122
|
+
fmt = f"{int(minutes / 60)}h {int(minutes % 60)}min"
|
|
123
|
+
else:
|
|
124
|
+
fmt = f"{int(minutes % 60)}min"
|
|
125
|
+
if self.is_remotely_executed():
|
|
126
|
+
self.info("Remote execution still in progress after {}", fmt)
|
|
127
|
+
else:
|
|
128
|
+
self.info("Execution still in progress after {}", fmt)
|
|
129
|
+
|
|
72
130
|
@property
|
|
73
131
|
def identity(self):
|
|
132
|
+
raise_task_error_if(not self._finalized, self, "Task identity read prematurely")
|
|
133
|
+
|
|
74
134
|
if self.task.identity is not None:
|
|
75
135
|
return self.task.identity
|
|
76
136
|
|
|
@@ -91,7 +151,15 @@ class TaskProxy(object):
|
|
|
91
151
|
|
|
92
152
|
@property
|
|
93
153
|
def instance(self):
|
|
94
|
-
return self.task.
|
|
154
|
+
return self.task.instance
|
|
155
|
+
|
|
156
|
+
@instance.setter
|
|
157
|
+
def instance(self, value):
|
|
158
|
+
self.task.instance = value
|
|
159
|
+
|
|
160
|
+
@property
|
|
161
|
+
def is_unstable(self):
|
|
162
|
+
return self.task.unstable
|
|
95
163
|
|
|
96
164
|
@property
|
|
97
165
|
def weight(self):
|
|
@@ -107,6 +175,9 @@ class TaskProxy(object):
|
|
|
107
175
|
def info(self, fmt, *args, **kwargs):
|
|
108
176
|
self.task.info(fmt + " " + self.log_name, *args, **kwargs)
|
|
109
177
|
|
|
178
|
+
def debug(self, fmt, *args, **kwargs):
|
|
179
|
+
log.debug(fmt + " " + self.log_name, *args, **kwargs)
|
|
180
|
+
|
|
110
181
|
def verbose(self, fmt, *args, **kwargs):
|
|
111
182
|
log.verbose(fmt + " " + self.log_name, *args, **kwargs)
|
|
112
183
|
|
|
@@ -123,7 +194,7 @@ class TaskProxy(object):
|
|
|
123
194
|
return len(self.ancestors) > 0
|
|
124
195
|
|
|
125
196
|
def has_artifact(self):
|
|
126
|
-
return self.is_cacheable() and not self.
|
|
197
|
+
return self.is_cacheable() and not self.is_alias()
|
|
127
198
|
|
|
128
199
|
def has_extensions(self):
|
|
129
200
|
return len(self.extensions) > 0
|
|
@@ -141,24 +212,34 @@ class TaskProxy(object):
|
|
|
141
212
|
return self._extended_task.get_extended_task()
|
|
142
213
|
return self
|
|
143
214
|
|
|
144
|
-
def deps_available_locally(self
|
|
215
|
+
def deps_available_locally(self):
|
|
145
216
|
for c in self.children:
|
|
146
217
|
if c.is_resource() or c.is_alias():
|
|
147
218
|
continue
|
|
148
|
-
if not c.is_available_locally(
|
|
219
|
+
if not c.is_available_locally(persistent_only=True):
|
|
149
220
|
return False
|
|
150
221
|
return True
|
|
151
222
|
|
|
152
223
|
def is_alias(self):
|
|
153
224
|
return isinstance(self.task, Alias)
|
|
154
225
|
|
|
155
|
-
def is_available_locally(self,
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
226
|
+
def is_available_locally(self, extensions=True, persistent_only=True):
|
|
227
|
+
dep_artifacts = []
|
|
228
|
+
if extensions:
|
|
229
|
+
for dep in self.extensions:
|
|
230
|
+
dep_artifacts += dep.artifacts
|
|
231
|
+
artifacts = self._artifacts + dep_artifacts
|
|
232
|
+
if persistent_only:
|
|
233
|
+
artifacts = filter(lambda a: not a.is_session(), artifacts)
|
|
234
|
+
return all(map(self.cache.is_available_locally, artifacts))
|
|
235
|
+
|
|
236
|
+
def is_available_remotely(self, extensions=True, cache=True):
|
|
237
|
+
dep_artifacts = []
|
|
238
|
+
if extensions:
|
|
239
|
+
for dep in self.extensions:
|
|
240
|
+
dep_artifacts += dep.artifacts
|
|
241
|
+
artifacts = filter(lambda a: not a.is_session(), self._artifacts + dep_artifacts)
|
|
242
|
+
return all(map(lambda artifact: self.cache.is_available_remotely(artifact, cache=cache), artifacts))
|
|
162
243
|
|
|
163
244
|
def is_cacheable(self):
|
|
164
245
|
return self.task.is_cacheable()
|
|
@@ -182,6 +263,12 @@ class TaskProxy(object):
|
|
|
182
263
|
def is_goal(self, with_extensions=True):
|
|
183
264
|
return self._goal or (with_extensions and any([e.is_goal() for e in self.extensions]))
|
|
184
265
|
|
|
266
|
+
def is_local(self):
|
|
267
|
+
if self.is_extension():
|
|
268
|
+
return self.get_extended_task().is_local()
|
|
269
|
+
tasks = [self.task] + [e.task for e in self.extensions]
|
|
270
|
+
return any([task.local for task in tasks])
|
|
271
|
+
|
|
185
272
|
def in_progress(self):
|
|
186
273
|
return self._in_progress
|
|
187
274
|
|
|
@@ -203,27 +290,125 @@ class TaskProxy(object):
|
|
|
203
290
|
def is_resource(self):
|
|
204
291
|
return isinstance(self.task, Resource)
|
|
205
292
|
|
|
293
|
+
def is_workspace_resource(self):
|
|
294
|
+
return isinstance(self.task, WorkspaceResource)
|
|
295
|
+
|
|
296
|
+
def is_running(self):
|
|
297
|
+
return self.status() == common_pb.TaskStatus.TASK_RUNNING
|
|
298
|
+
|
|
206
299
|
def is_unpackable(self):
|
|
207
|
-
|
|
300
|
+
tasks = [self] + self.extensions
|
|
301
|
+
artifacts = []
|
|
302
|
+
for task in tasks:
|
|
303
|
+
artifacts.extend(task._artifacts)
|
|
304
|
+
return any(map(lambda artifact: artifact.is_unpackable(), artifacts))
|
|
208
305
|
|
|
209
|
-
def is_unpacked(self
|
|
306
|
+
def is_unpacked(self):
|
|
210
307
|
tasks = [self] + self.extensions
|
|
211
|
-
|
|
308
|
+
artifacts = []
|
|
309
|
+
for task in tasks:
|
|
310
|
+
artifacts.extend(task._artifacts)
|
|
311
|
+
return any(map(lambda artifact: artifact.is_unpacked(), artifacts))
|
|
212
312
|
|
|
213
|
-
def is_uploadable(self,
|
|
313
|
+
def is_uploadable(self, artifacts=None):
|
|
214
314
|
tasks = [self] + self.extensions
|
|
215
|
-
|
|
315
|
+
if not artifacts:
|
|
316
|
+
artifacts = []
|
|
317
|
+
for task in tasks:
|
|
318
|
+
artifacts.extend(task._artifacts)
|
|
319
|
+
return all(map(lambda artifact: artifact.is_uploadable(), artifacts))
|
|
216
320
|
|
|
217
|
-
|
|
218
|
-
|
|
321
|
+
@contextmanager
|
|
322
|
+
def lock_artifacts(self, discard=False):
|
|
323
|
+
artifacts = []
|
|
324
|
+
stack = ExitStack()
|
|
325
|
+
try:
|
|
326
|
+
for artifact in self.artifacts:
|
|
327
|
+
lock = self.cache.lock_artifact(artifact, discard=discard)
|
|
328
|
+
artifacts.append(stack.enter_context(lock))
|
|
329
|
+
self._artifacts = artifacts
|
|
330
|
+
yield artifacts
|
|
331
|
+
finally:
|
|
332
|
+
stack.close()
|
|
219
333
|
|
|
220
334
|
def disable_download(self):
|
|
221
335
|
self._download = False
|
|
222
336
|
|
|
337
|
+
def download(self, force=False, session_only=False, persistent_only=False):
|
|
338
|
+
"""
|
|
339
|
+
Downloads all artifacts of this task.
|
|
340
|
+
|
|
341
|
+
If the task is not downloadable, the method returns True. Failure to
|
|
342
|
+
download persistent artifacts is considered a failure, and the method
|
|
343
|
+
returns False. Session artifacts are not required to be downloaded.
|
|
344
|
+
|
|
345
|
+
:param force: Force download even if the artifacts are already available.
|
|
346
|
+
:param session_only: Download only session artifacts.
|
|
347
|
+
:param persistent_only: Download only persistent artifacts.
|
|
348
|
+
|
|
349
|
+
"""
|
|
350
|
+
if not force and not self.is_downloadable():
|
|
351
|
+
return True
|
|
352
|
+
success = True
|
|
353
|
+
artifacts = self._artifacts
|
|
354
|
+
artifacts_session = list(filter(lambda a: a.is_session(), artifacts))
|
|
355
|
+
artifacts_persistent = list(filter(lambda a: not a.is_session(), artifacts))
|
|
356
|
+
download_all = not session_only and not persistent_only
|
|
357
|
+
if session_only or download_all:
|
|
358
|
+
for artifact in artifacts_session:
|
|
359
|
+
if not self.cache.download(artifact, force=force):
|
|
360
|
+
self.warning("Failed to download session artifact: {}", artifact.identity)
|
|
361
|
+
if persistent_only or download_all:
|
|
362
|
+
success = all([self.cache.download(artifact, force=force) for artifact in artifacts_persistent])
|
|
363
|
+
return success
|
|
364
|
+
|
|
365
|
+
def upload(self, force=False, locked=False, session_only=False, persistent_only=False, artifacts=None):
|
|
366
|
+
artifacts = artifacts or self._artifacts
|
|
367
|
+
if session_only:
|
|
368
|
+
artifacts = list(filter(lambda a: a.is_session(), artifacts))
|
|
369
|
+
if persistent_only:
|
|
370
|
+
artifacts = list(filter(lambda a: not a.is_session(), artifacts))
|
|
371
|
+
if not artifacts:
|
|
372
|
+
return True
|
|
373
|
+
if not self.is_uploadable(artifacts):
|
|
374
|
+
return False
|
|
375
|
+
return all([self.cache.upload(artifact, force=force, locked=locked) for artifact in artifacts])
|
|
376
|
+
|
|
223
377
|
def resolve_requirement_alias(self, name):
|
|
224
378
|
return self.requirement_aliases.get(name)
|
|
225
379
|
|
|
380
|
+
def set_cancelled(self):
|
|
381
|
+
self.set_status(common_pb.TaskStatus.TASK_CANCELLED)
|
|
382
|
+
|
|
383
|
+
def set_passed(self):
|
|
384
|
+
self.set_status(common_pb.TaskStatus.TASK_PASSED)
|
|
385
|
+
|
|
386
|
+
def set_failed(self):
|
|
387
|
+
self.set_status(common_pb.TaskStatus.TASK_FAILED)
|
|
388
|
+
|
|
389
|
+
def set_skipped(self):
|
|
390
|
+
self.set_status(common_pb.TaskStatus.TASK_SKIPPED)
|
|
391
|
+
|
|
392
|
+
def set_downloaded(self):
|
|
393
|
+
self.set_status(common_pb.TaskStatus.TASK_DOWNLOADED)
|
|
394
|
+
|
|
395
|
+
def set_uploaded(self):
|
|
396
|
+
self.set_status(common_pb.TaskStatus.TASK_UPLOADED)
|
|
397
|
+
|
|
398
|
+
def set_running(self):
|
|
399
|
+
self.set_status(common_pb.TaskStatus.TASK_RUNNING)
|
|
400
|
+
|
|
401
|
+
def set_queued(self):
|
|
402
|
+
self.set_status(common_pb.TaskStatus.TASK_QUEUED)
|
|
403
|
+
|
|
404
|
+
def status(self):
|
|
405
|
+
return self._status
|
|
406
|
+
|
|
407
|
+
def set_status(self, status):
|
|
408
|
+
self._status = status
|
|
409
|
+
|
|
226
410
|
def set_in_progress(self):
|
|
411
|
+
self.set_queued()
|
|
227
412
|
self._in_progress = True
|
|
228
413
|
|
|
229
414
|
def set_locally_executed(self):
|
|
@@ -235,9 +420,12 @@ class TaskProxy(object):
|
|
|
235
420
|
def set_goal(self):
|
|
236
421
|
self._goal = True
|
|
237
422
|
|
|
238
|
-
def
|
|
423
|
+
def set_owner(self, owner):
|
|
424
|
+
self._owner = owner
|
|
425
|
+
self.task.exported_name = f"{self.short_qualified_name}@@{owner.short_qualified_name}"
|
|
426
|
+
|
|
427
|
+
def finalize(self, dag):
|
|
239
428
|
log.debug("Finalizing: " + self.short_qualified_name)
|
|
240
|
-
self.manifest = manifest
|
|
241
429
|
|
|
242
430
|
# Find all direct and transitive dependencies
|
|
243
431
|
self.ancestors = set()
|
|
@@ -253,44 +441,127 @@ class TaskProxy(object):
|
|
|
253
441
|
self.children.extend(n.children)
|
|
254
442
|
n.ancestors.add(self)
|
|
255
443
|
|
|
256
|
-
# Exclude transitive alias and resources dependencies
|
|
444
|
+
# Exclude transitive alias and resources dependencies.
|
|
445
|
+
# Workspace resources are included as they may be required by its dependencies.
|
|
257
446
|
self.children = list(
|
|
258
|
-
filter(lambda n: not n.is_alias() and
|
|
447
|
+
filter(lambda n: dag.are_neighbors(self, n) or (not n.is_alias() and not n.is_resource()),
|
|
259
448
|
utils.unique_list(self.children)))
|
|
260
449
|
|
|
450
|
+
# Prepare workspace resources for this task so that influence can be calculated
|
|
451
|
+
for child in self.children:
|
|
452
|
+
if not child.is_workspace_resource():
|
|
453
|
+
continue
|
|
454
|
+
child.task.prepare_ws_for(self.task)
|
|
455
|
+
|
|
261
456
|
self.descendants = list(self.descendants)
|
|
262
457
|
|
|
263
458
|
self.task.influence += [TaskRequirementInfluence(n) for n in self.neighbors]
|
|
459
|
+
self._finalized = True
|
|
460
|
+
self.identity
|
|
461
|
+
|
|
462
|
+
hooks.task_created(self)
|
|
264
463
|
|
|
265
464
|
return self.identity
|
|
266
465
|
|
|
466
|
+
def finalize_artifacts(self):
|
|
467
|
+
self._artifacts.extend(self.task._artifacts(self.cache, self))
|
|
468
|
+
|
|
267
469
|
def taint(self, salt=None):
|
|
268
470
|
self.task.taint = salt or uuid.uuid4()
|
|
269
471
|
if salt is None:
|
|
270
472
|
# Only recalculate identity when build is forced, not when salted
|
|
271
473
|
self.identity = None
|
|
272
474
|
self.identity
|
|
475
|
+
# Recreate artifacts
|
|
476
|
+
self._artifacts = []
|
|
477
|
+
self.finalize_artifacts()
|
|
273
478
|
|
|
274
|
-
|
|
275
|
-
self.
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
479
|
+
# If this is an alias, taint all children
|
|
480
|
+
if self.is_alias():
|
|
481
|
+
for child in self.children:
|
|
482
|
+
child.taint()
|
|
483
|
+
|
|
484
|
+
# Taint all extensions
|
|
485
|
+
for extension in self.extensions:
|
|
486
|
+
extension.taint()
|
|
279
487
|
|
|
280
|
-
def
|
|
488
|
+
def queued(self, remote=True):
|
|
489
|
+
self.task.verbose("Task queued " + self.log_name)
|
|
490
|
+
self.duration_queued = utils.duration()
|
|
491
|
+
self.set_queued()
|
|
492
|
+
hooks.task_queued(self)
|
|
493
|
+
|
|
494
|
+
def running(self, when=None, what="Execution"):
|
|
495
|
+
if what:
|
|
496
|
+
self.task.info(colors.blue(what + " started " + self.log_name))
|
|
497
|
+
self.set_running()
|
|
498
|
+
hooks.task_started(self)
|
|
281
499
|
self.duration_running = utils.duration() if not when else when
|
|
282
500
|
|
|
283
|
-
def
|
|
284
|
-
self.
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
501
|
+
def running_execution(self, remote=False):
|
|
502
|
+
self.worker = socket.gethostname()
|
|
503
|
+
hooks.task_started_execution(self)
|
|
504
|
+
self.running(what="Remote execution" if remote else "Execution")
|
|
505
|
+
|
|
506
|
+
def restarted_execution(self, remote=False):
|
|
507
|
+
hooks.task_finished_execution(self)
|
|
508
|
+
self.task.warning("Remote execution interrupted {}" if remote else "Execution interrupted {}", self.log_name)
|
|
509
|
+
self.queued()
|
|
510
|
+
|
|
511
|
+
def started_execution(self, remote=False):
|
|
512
|
+
self.queued()
|
|
513
|
+
self.running_execution(remote=remote)
|
|
514
|
+
|
|
515
|
+
def started_download(self):
|
|
516
|
+
self.queued()
|
|
517
|
+
self.running(what="Download")
|
|
518
|
+
hooks.task_started_download(self)
|
|
519
|
+
|
|
520
|
+
def started_upload(self):
|
|
521
|
+
self.queued()
|
|
522
|
+
self.running(what="Upload")
|
|
523
|
+
hooks.task_started_upload(self)
|
|
524
|
+
|
|
525
|
+
def _failed(self, what="Execution", interrupt=False):
|
|
526
|
+
if interrupt:
|
|
527
|
+
how = "interrupted"
|
|
528
|
+
logfn = self.warning
|
|
529
|
+
self.set_cancelled()
|
|
530
|
+
else:
|
|
531
|
+
how = "failed"
|
|
532
|
+
logfn = self.error
|
|
533
|
+
self.set_failed()
|
|
534
|
+
|
|
535
|
+
if self.duration_queued and self.duration_running:
|
|
536
|
+
logfn("{0} {1} after {2} {3}", what, how,
|
|
537
|
+
self.duration_running,
|
|
538
|
+
self.duration_queued.diff(self.duration_running))
|
|
539
|
+
elif self.duration_queued:
|
|
540
|
+
logfn("{0} {1} after {2}", what, how, self.duration_queued or utils.duration())
|
|
541
|
+
else:
|
|
542
|
+
logfn("{0} {1} immediately", what, how)
|
|
543
|
+
|
|
544
|
+
if self.is_unstable:
|
|
545
|
+
try:
|
|
546
|
+
self.graph.remove_node(self)
|
|
547
|
+
except KeyError:
|
|
548
|
+
self.warning("Pruned task was executed")
|
|
549
|
+
self.graph.add_unstable(self)
|
|
550
|
+
hooks.task_unstable(self)
|
|
551
|
+
else:
|
|
552
|
+
self.graph.add_failed(self)
|
|
553
|
+
hooks.task_failed(self)
|
|
554
|
+
|
|
555
|
+
def failed_download(self):
|
|
556
|
+
self._failed("Download")
|
|
557
|
+
|
|
558
|
+
def failed_upload(self):
|
|
559
|
+
self._failed("Upload")
|
|
289
560
|
|
|
290
|
-
def
|
|
291
|
-
|
|
561
|
+
def failed_execution(self, remote=False, interrupt=False):
|
|
562
|
+
self._failed(what="Remote execution" if remote else "Execution", interrupt=interrupt)
|
|
292
563
|
|
|
293
|
-
def
|
|
564
|
+
def _finished(self, what="Execution"):
|
|
294
565
|
raise_task_error_if(
|
|
295
566
|
self.is_completed() and not self.is_extension(),
|
|
296
567
|
self, "task has already been completed")
|
|
@@ -300,118 +571,387 @@ class TaskProxy(object):
|
|
|
300
571
|
except KeyError:
|
|
301
572
|
self.warning("Pruned task was executed")
|
|
302
573
|
self.task.info(colors.green(what + " finished after {0} {1}" + self.log_name),
|
|
303
|
-
self.duration_running,
|
|
574
|
+
self.duration_running or "00s",
|
|
304
575
|
self.duration_queued.diff(self.duration_running))
|
|
305
576
|
hooks.task_finished(self)
|
|
306
577
|
|
|
578
|
+
def finished_download(self):
|
|
579
|
+
self.set_downloaded()
|
|
580
|
+
hooks.task_finished_download(self)
|
|
581
|
+
self._finished("Download")
|
|
582
|
+
|
|
583
|
+
def finished_upload(self):
|
|
584
|
+
self.set_uploaded()
|
|
585
|
+
hooks.task_finished_upload(self)
|
|
586
|
+
self._finished("Upload")
|
|
587
|
+
|
|
588
|
+
def finished_execution(self, remote=False):
|
|
589
|
+
self.set_passed()
|
|
590
|
+
hooks.task_finished_execution(self)
|
|
591
|
+
self._finished(what="Remote execution" if remote else "Execution")
|
|
592
|
+
|
|
307
593
|
def skipped(self):
|
|
308
594
|
self._completed = True
|
|
309
595
|
try:
|
|
310
596
|
self.graph.remove_node(self)
|
|
311
597
|
except KeyError:
|
|
312
598
|
pass
|
|
599
|
+
self.set_skipped()
|
|
313
600
|
hooks.task_skipped(self)
|
|
314
601
|
|
|
315
602
|
def pruned(self):
|
|
316
603
|
self._completed = True
|
|
317
604
|
try:
|
|
318
605
|
self.graph.remove_node(self)
|
|
606
|
+
self.graph.add_pruned(self)
|
|
607
|
+
hooks.task_pruned(self)
|
|
319
608
|
except KeyError:
|
|
320
609
|
self.warning("Pruned task was already pruned")
|
|
321
|
-
hooks.task_pruned(self)
|
|
322
610
|
|
|
323
611
|
def clean(self, cache, if_expired, onerror=None):
|
|
324
612
|
with self.tools:
|
|
325
613
|
self.task.clean(self.tools)
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
614
|
+
for artifact in self.artifacts:
|
|
615
|
+
discarded = cache.discard(artifact, if_expired, onerror=fs.onerror_warning)
|
|
616
|
+
if discarded:
|
|
617
|
+
log.debug("Discarded: {} ({})", self.short_qualified_name, artifact.identity)
|
|
618
|
+
else:
|
|
619
|
+
log.debug(" Retained: {} ({})", self.short_qualified_name, artifact.identity)
|
|
331
620
|
|
|
332
|
-
def
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
621
|
+
def _run_download_dependencies(self, resource_only=False):
|
|
622
|
+
for child in self.children:
|
|
623
|
+
if not child.has_artifact():
|
|
624
|
+
continue
|
|
336
625
|
|
|
337
|
-
|
|
338
|
-
if
|
|
339
|
-
|
|
340
|
-
if not cache.is_available_locally(child):
|
|
626
|
+
if child.is_resource() and child.is_local():
|
|
627
|
+
if child.options.worker:
|
|
628
|
+
# Resource already acquired by the client when running as worker
|
|
341
629
|
raise_task_error_if(
|
|
342
|
-
not
|
|
343
|
-
child, "
|
|
630
|
+
not child.download(force=True),
|
|
631
|
+
child, "Failed to download task artifact")
|
|
632
|
+
else:
|
|
633
|
+
# Resource about to be acquired by the client
|
|
634
|
+
child._run_download_dependencies()
|
|
635
|
+
continue
|
|
636
|
+
|
|
637
|
+
if resource_only and not child.is_resource():
|
|
638
|
+
continue
|
|
344
639
|
|
|
640
|
+
raise_task_error_if(
|
|
641
|
+
not child.is_completed() and child.is_unstable,
|
|
642
|
+
self, "Task depends on failed task '{}'", child.short_qualified_name)
|
|
643
|
+
if not child.is_available_locally(extensions=False):
|
|
644
|
+
raise_task_error_if(
|
|
645
|
+
not child.download(force=True),
|
|
646
|
+
child, "Failed to download task artifact")
|
|
647
|
+
|
|
648
|
+
def partially_available_locally(self):
|
|
649
|
+
availability = map(lambda a: self.cache.is_available_locally(a), self.artifacts)
|
|
650
|
+
return any(availability) and not all(availability)
|
|
651
|
+
|
|
652
|
+
def _validate_platform(self):
|
|
653
|
+
""" Validates that the task is runnable on the current platform. """
|
|
654
|
+
platform_os, platform_arch = utils.platform_os_arch()
|
|
655
|
+
|
|
656
|
+
os = self.task.platform.get("node.os")
|
|
657
|
+
if os:
|
|
658
|
+
os = self.tools.expand(os)
|
|
659
|
+
raise_task_error_if(
|
|
660
|
+
os != platform_os,
|
|
661
|
+
self, f"Task is not runnable on current platform (wants node.os={os})")
|
|
662
|
+
|
|
663
|
+
arch = self.task.platform.get("node.arch")
|
|
664
|
+
if arch:
|
|
665
|
+
arch = self.tools.expand(arch)
|
|
666
|
+
raise_task_error_if(
|
|
667
|
+
arch != platform_arch,
|
|
668
|
+
self, f"Task is not runnable on current platform (wants node.arch={arch})")
|
|
669
|
+
|
|
670
|
+
def run_acquire(self, artifact, owner, log_prefix=False):
|
|
671
|
+
"""
|
|
672
|
+
Acquires a resource and publishes its artifact.
|
|
673
|
+
|
|
674
|
+
The artifact is published to the cache even if the acquisition fails.
|
|
675
|
+
"""
|
|
676
|
+
|
|
677
|
+
try:
|
|
678
|
+
if not self.is_workspace_resource():
|
|
679
|
+
ts = utils.duration()
|
|
680
|
+
log.info(colors.blue("Resource acquisition started ({} for {})"), self.short_qualified_name, owner.short_qualified_name)
|
|
681
|
+
|
|
682
|
+
try:
|
|
683
|
+
with log.thread_prefix(owner.identity[:8]) if log_prefix else nullcontext():
|
|
684
|
+
acquire = getattr(self.task, "acquire_" + artifact.name) if artifact.name != "main" else self.task.acquire
|
|
685
|
+
acquire(artifact, self.deps, self.tools, owner.task)
|
|
686
|
+
finally:
|
|
687
|
+
# Always commit the resource session artifact to the cache, even if the acquisition failed.
|
|
688
|
+
if not self.is_workspace_resource():
|
|
689
|
+
self.cache.commit(artifact)
|
|
690
|
+
|
|
691
|
+
if not self.is_workspace_resource():
|
|
692
|
+
log.info(colors.green("Resource acquisition finished after {} ({} for {})"), ts, self.short_qualified_name, owner.short_qualified_name)
|
|
693
|
+
|
|
694
|
+
except (KeyboardInterrupt, Exception) as e:
|
|
695
|
+
if not self.is_workspace_resource():
|
|
696
|
+
log.error(colors.red("Resource acquisition failed after {} ({} for {})"), ts, self.short_qualified_name, owner.short_qualified_name)
|
|
697
|
+
if self.task.release_on_error:
|
|
698
|
+
with utils.ignore_exception():
|
|
699
|
+
self.run_release(artifact, owner)
|
|
700
|
+
raise e
|
|
701
|
+
|
|
702
|
+
def run_release(self, artifact, owner, log_prefix=False):
|
|
703
|
+
"""
|
|
704
|
+
Releases a resource.
|
|
705
|
+
"""
|
|
706
|
+
try:
|
|
707
|
+
if not self.is_workspace_resource():
|
|
708
|
+
ts = utils.duration()
|
|
709
|
+
log.info(colors.blue("Resource release started ({} for {})"), self.short_qualified_name, owner.short_qualified_name)
|
|
710
|
+
|
|
711
|
+
with log.thread_prefix(owner.identity[:8]) if log_prefix else nullcontext():
|
|
712
|
+
release = getattr(self.task, "release_" + artifact.name) if artifact.name != "main" else self.task.release
|
|
713
|
+
release(artifact, self.deps, self.tools, owner.task)
|
|
714
|
+
|
|
715
|
+
if not self.is_workspace_resource():
|
|
716
|
+
log.info(colors.green("Resource release finished after {} ({} for {})"), ts, self.short_qualified_name, owner.short_qualified_name)
|
|
717
|
+
|
|
718
|
+
except (KeyboardInterrupt, Exception) as e:
|
|
719
|
+
if not self.is_workspace_resource():
|
|
720
|
+
log.error(colors.red("Resource release failed after {} ({} for {})"), ts, self.short_qualified_name, owner.short_qualified_name)
|
|
721
|
+
raise e
|
|
722
|
+
|
|
723
|
+
@contextmanager
|
|
724
|
+
def run_resources(self):
|
|
725
|
+
"""
|
|
726
|
+
Acquires and releases resources for the task.
|
|
727
|
+
|
|
728
|
+
The method is called by executors before invoking the task proxy's run() method.
|
|
729
|
+
Resource dependencies are acquired and released in reverse order. If an acquisition fails,
|
|
730
|
+
already acquired resources are released in reverse order and the exception is propagated
|
|
731
|
+
to the caller.
|
|
732
|
+
|
|
733
|
+
Resource artifacts are always published and uploaded if the acquisition has been started,
|
|
734
|
+
even if the acquisition fails. That way, a failed acquisition can be debugged.
|
|
735
|
+
"""
|
|
736
|
+
self._run_download_dependencies(resource_only=True)
|
|
737
|
+
|
|
738
|
+
with self._run_resources_no_dep_download():
|
|
739
|
+
yield
|
|
740
|
+
|
|
741
|
+
@contextmanager
|
|
742
|
+
def _run_resources_no_dep_download(self):
|
|
743
|
+
# Log messages are prefixed with task identity if resources are acquired in parallel
|
|
744
|
+
log_prefix = False
|
|
745
|
+
|
|
746
|
+
# Collect list of resource dependencies
|
|
747
|
+
resource_deps = [child for child in self.children if child.is_resource()]
|
|
748
|
+
|
|
749
|
+
if self.options.worker:
|
|
750
|
+
# Exclude local resources when running as worker. They are already acquired by the client.
|
|
751
|
+
resource_deps = [child for child in resource_deps if not child.is_local()]
|
|
752
|
+
elif self.options.network and not self.is_local():
|
|
753
|
+
# Exclude non-local resources in the client when running a network build.
|
|
754
|
+
# They are acquired by the remote worker.
|
|
755
|
+
resource_deps = [child for child in resource_deps if child.is_local()]
|
|
756
|
+
log_prefix = True
|
|
757
|
+
|
|
758
|
+
exitstack = ExitStack()
|
|
759
|
+
acquired = []
|
|
760
|
+
try:
|
|
761
|
+
# Acquire resource dependencies in reverse order.
|
|
762
|
+
for resource in reversed(resource_deps):
|
|
763
|
+
# Always discard resource artifacts before acquiring the resource.
|
|
764
|
+
# They should not exist in the cache when the resource is acquired,
|
|
765
|
+
# but may exist if the resource was previously acquired by an interrupted build.
|
|
766
|
+
with resource.lock_artifacts(discard=True) if not resource.is_workspace_resource() else nullcontext():
|
|
767
|
+
resource.deps = self.cache.get_context(resource)
|
|
768
|
+
exitstack.enter_context(resource.deps)
|
|
769
|
+
|
|
770
|
+
# Just like tasks, a resource may have multiple artifacts. Run acquire for each artifact.
|
|
771
|
+
for artifact in resource.artifacts:
|
|
772
|
+
try:
|
|
773
|
+
resource.run_acquire(artifact, self, log_prefix=log_prefix)
|
|
774
|
+
acquired.append(resource)
|
|
775
|
+
finally:
|
|
776
|
+
# Always upload the artifact session artifact to the cache, even if the acquisition failed.
|
|
777
|
+
if not resource.is_workspace_resource():
|
|
778
|
+
resource.upload(locked=False, session_only=True, artifacts=[artifact])
|
|
779
|
+
|
|
780
|
+
yield
|
|
781
|
+
|
|
782
|
+
finally:
|
|
783
|
+
for resource in reversed(acquired):
|
|
784
|
+
for artifact in resource.artifacts:
|
|
785
|
+
resource.run_release(artifact, self, log_prefix=log_prefix)
|
|
786
|
+
exitstack.close()
|
|
787
|
+
|
|
788
|
+
def run(self, env, force_upload=False, force_build=False):
|
|
789
|
+
# Download dependency artifacts if not already done
|
|
790
|
+
self._run_download_dependencies()
|
|
791
|
+
|
|
792
|
+
with self._run_resources_no_dep_download():
|
|
793
|
+
self._run_task(env, force_upload, force_build)
|
|
794
|
+
|
|
795
|
+
def _run_task(self, env, force_upload=False, force_build=False):
|
|
796
|
+
queue = env.queue
|
|
797
|
+
|
|
798
|
+
with self.tools:
|
|
799
|
+
available_locally = available_remotely = False
|
|
800
|
+
|
|
801
|
+
# Check if task artifact is available locally or remotely,
|
|
802
|
+
# either skip execution or download it if necessary.
|
|
345
803
|
if not force_build:
|
|
346
|
-
available_locally =
|
|
804
|
+
available_locally = self.is_available_locally()
|
|
347
805
|
if available_locally and not force_upload:
|
|
806
|
+
self.skipped()
|
|
348
807
|
return
|
|
349
|
-
|
|
350
|
-
|
|
808
|
+
|
|
809
|
+
available_remotely = self.cache.download_enabled() and self.is_available_remotely()
|
|
351
810
|
if not available_locally and available_remotely:
|
|
352
|
-
available_locally =
|
|
811
|
+
available_locally = self.download()
|
|
812
|
+
|
|
813
|
+
if not available_locally and self.partially_available_locally():
|
|
814
|
+
force_build = True
|
|
353
815
|
|
|
354
816
|
if force_build or not available_locally:
|
|
355
817
|
with log.threadsink() as buildlog:
|
|
356
818
|
if self.task.is_runnable():
|
|
357
819
|
log.verbose("Host: {0}", getenv("HOSTNAME", "localhost"))
|
|
358
820
|
|
|
359
|
-
with
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
with self.tools.cwd(self.task.joltdir):
|
|
373
|
-
hooks.task_prepublish(self, artifact, self.tools)
|
|
374
|
-
self.task.publish(artifact, self.tools)
|
|
375
|
-
self.task._verify_influence(context, artifact, self.tools)
|
|
376
|
-
hooks.task_postpublish(self, artifact, self.tools)
|
|
377
|
-
with open(fs.path.join(artifact.path, ".build.log"), "w") as f:
|
|
378
|
-
f.write(buildlog.getvalue())
|
|
379
|
-
cache.commit(artifact)
|
|
380
|
-
else:
|
|
381
|
-
self.info("Publication skipped, already in local cache")
|
|
382
|
-
else:
|
|
383
|
-
self.info("Execution skipped, already in local cache")
|
|
821
|
+
with self.lock_artifacts(discard=not self.is_resource()) as artifacts:
|
|
822
|
+
exitstack = ExitStack()
|
|
823
|
+
|
|
824
|
+
# Indicates whether session artifacts have been published
|
|
825
|
+
upload_session_artifacts = False
|
|
826
|
+
|
|
827
|
+
try:
|
|
828
|
+
context = self.cache.get_context(self)
|
|
829
|
+
exitstack.enter_context(context)
|
|
830
|
+
|
|
831
|
+
self.running_execution()
|
|
832
|
+
|
|
833
|
+
self._validate_platform()
|
|
384
834
|
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
835
|
+
with self.tools.cwd(self.task.joltdir):
|
|
836
|
+
if self.is_goal() and self.options.debug:
|
|
837
|
+
log.info("Entering debug shell")
|
|
838
|
+
self.task.debugshell(context, self.tools)
|
|
839
|
+
|
|
840
|
+
try:
|
|
841
|
+
# Run task
|
|
842
|
+
try:
|
|
843
|
+
hooks.task_prerun(self, context, self.tools)
|
|
844
|
+
with self.tools.timeout(seconds=config.getint("jolt", "task_timeout")):
|
|
845
|
+
self.task.run(context, self.tools)
|
|
846
|
+
finally:
|
|
847
|
+
hooks.task_postrun(self, context, self.tools)
|
|
848
|
+
|
|
849
|
+
# Publish persistent artifacts
|
|
850
|
+
if not self.is_available_locally(extensions=False):
|
|
851
|
+
for artifact in filter(lambda a: not a.is_session(), artifacts):
|
|
852
|
+
self.publish(context, artifact, buildlog)
|
|
853
|
+
else:
|
|
854
|
+
self.info("Publication skipped, already in local cache")
|
|
855
|
+
|
|
856
|
+
finally:
|
|
857
|
+
# Session artifacts should be uploaded
|
|
858
|
+
upload_session_artifacts = []
|
|
859
|
+
|
|
860
|
+
# Publish session artifacts to local cache
|
|
861
|
+
for artifact in filter(lambda a: a.is_session(), artifacts):
|
|
862
|
+
self.publish(context, artifact)
|
|
863
|
+
upload_session_artifacts.append(artifact)
|
|
864
|
+
|
|
865
|
+
except KeyboardInterrupt as e:
|
|
866
|
+
self.failed_execution(interrupt=True)
|
|
867
|
+
with utils.ignore_exception():
|
|
868
|
+
exitstack.close()
|
|
869
|
+
|
|
870
|
+
raise e
|
|
871
|
+
|
|
872
|
+
except Exception as e:
|
|
873
|
+
self.failed_execution(interrupt=queue.is_aborted() if queue else False)
|
|
874
|
+
|
|
875
|
+
with utils.ignore_exception():
|
|
876
|
+
exitstack.close()
|
|
877
|
+
|
|
878
|
+
if queue is not None and queue.is_aborted():
|
|
879
|
+
raise KeyboardInterrupt()
|
|
880
|
+
|
|
881
|
+
if cli.debug_enabled:
|
|
882
|
+
import pdb
|
|
883
|
+
extype, value, tb = sys.exc_info()
|
|
884
|
+
pdb.post_mortem(tb)
|
|
885
|
+
|
|
886
|
+
raise e
|
|
887
|
+
|
|
888
|
+
else:
|
|
889
|
+
self.finished_execution()
|
|
890
|
+
exitstack.close()
|
|
891
|
+
|
|
892
|
+
# Must upload the artifact while still holding its lock, otherwise the
|
|
893
|
+
# artifact may become unpack():ed before we have a chance to.
|
|
894
|
+
if force_upload or force_build or not available_remotely:
|
|
895
|
+
raise_task_error_if(
|
|
896
|
+
not self.upload(force=force_upload, locked=False, persistent_only=True) \
|
|
897
|
+
and self.cache.upload_enabled(),
|
|
898
|
+
self, "Failed to upload task artifact")
|
|
899
|
+
|
|
900
|
+
finally:
|
|
901
|
+
# Upload published session artifacts to remote cache
|
|
388
902
|
raise_task_error_if(
|
|
389
|
-
|
|
390
|
-
self,
|
|
903
|
+
upload_session_artifacts \
|
|
904
|
+
and not self.upload(force=force_upload, locked=False, session_only=True, artifacts=upload_session_artifacts) \
|
|
905
|
+
and self.cache.upload_enabled(),
|
|
906
|
+
self, "Failed to upload session artifact")
|
|
907
|
+
|
|
391
908
|
elif force_upload or not available_remotely:
|
|
909
|
+
self.started_upload()
|
|
392
910
|
raise_task_error_if(
|
|
393
|
-
not
|
|
394
|
-
self,
|
|
911
|
+
not self.upload(force=force_upload, persistent_only=True) \
|
|
912
|
+
and self.cache.upload_enabled(),
|
|
913
|
+
self, "Failed to upload task artifact")
|
|
914
|
+
self.finished_upload()
|
|
915
|
+
|
|
916
|
+
else:
|
|
917
|
+
self.skipped()
|
|
918
|
+
self.info("Execution skipped, already in local cache")
|
|
395
919
|
|
|
396
920
|
for extension in self.extensions:
|
|
397
|
-
|
|
398
|
-
extension.
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
921
|
+
with hooks.task_run(extension):
|
|
922
|
+
extension.run(env, force_upload, force_build)
|
|
923
|
+
|
|
924
|
+
def publish(self, context, artifact, buildlog=None):
|
|
925
|
+
hooks.task_prepublish(self, artifact, self.tools)
|
|
926
|
+
publish = self.task.publish if artifact.is_main() else \
|
|
927
|
+
getattr(self.task, "publish_" + artifact.name)
|
|
928
|
+
publish(artifact, self.tools)
|
|
929
|
+
self.task._verify_influence(context, artifact, self.tools)
|
|
930
|
+
if artifact.is_main() and buildlog:
|
|
931
|
+
with open(fs.path.join(artifact.path, ".build.log"), "w") as f:
|
|
932
|
+
f.write(buildlog.getvalue())
|
|
933
|
+
hooks.task_postpublish(self, artifact, self.tools)
|
|
934
|
+
artifact.get_cache().commit(artifact)
|
|
935
|
+
|
|
936
|
+
def raise_for_status(self, log_details=False, log_error=False):
|
|
937
|
+
with self.report() as report:
|
|
938
|
+
report.raise_for_status(log_details, log_error)
|
|
406
939
|
|
|
407
940
|
def report(self):
|
|
408
941
|
return self.task.report()
|
|
409
942
|
|
|
943
|
+
def unpack(self):
|
|
944
|
+
""" Unpacks all artifacts produced by this task. """
|
|
945
|
+
for artifact in self.artifacts:
|
|
946
|
+
self.cache.unpack(artifact)
|
|
947
|
+
|
|
410
948
|
|
|
411
949
|
class Graph(object):
|
|
412
950
|
def __init__(self):
|
|
413
951
|
self._mutex = RLock()
|
|
414
952
|
self._failed = []
|
|
953
|
+
self._unstable = []
|
|
954
|
+
self._pruned = []
|
|
415
955
|
self._children = OrderedDict()
|
|
416
956
|
self._parents = OrderedDict()
|
|
417
957
|
|
|
@@ -430,6 +970,9 @@ class Graph(object):
|
|
|
430
970
|
del self._children[node]
|
|
431
971
|
del self._parents[node]
|
|
432
972
|
|
|
973
|
+
def add_pruned(self, node):
|
|
974
|
+
self._pruned.append(node)
|
|
975
|
+
|
|
433
976
|
def add_edges_from(self, edges):
|
|
434
977
|
with self._mutex:
|
|
435
978
|
for src, dst in edges:
|
|
@@ -470,6 +1013,18 @@ class Graph(object):
|
|
|
470
1013
|
g._parents[k] = copy.copy(v)
|
|
471
1014
|
return g
|
|
472
1015
|
|
|
1016
|
+
@property
|
|
1017
|
+
def artifacts(self):
|
|
1018
|
+
artifacts = []
|
|
1019
|
+
for node in self.nodes:
|
|
1020
|
+
if node.is_cacheable():
|
|
1021
|
+
artifacts.extend(node.artifacts)
|
|
1022
|
+
return artifacts
|
|
1023
|
+
|
|
1024
|
+
@property
|
|
1025
|
+
def persistent_artifacts(self):
|
|
1026
|
+
return list(filter(lambda a: not a.is_session(), self.artifacts))
|
|
1027
|
+
|
|
473
1028
|
@property
|
|
474
1029
|
def nodes(self):
|
|
475
1030
|
with self._mutex:
|
|
@@ -485,11 +1040,22 @@ class Graph(object):
|
|
|
485
1040
|
def add_failed(self, task):
|
|
486
1041
|
self._failed.append(task)
|
|
487
1042
|
|
|
1043
|
+
@property
|
|
1044
|
+
def unstable(self):
|
|
1045
|
+
return self._unstable
|
|
1046
|
+
|
|
1047
|
+
def add_unstable(self, task):
|
|
1048
|
+
self._unstable.append(task)
|
|
1049
|
+
|
|
488
1050
|
@property
|
|
489
1051
|
def tasks(self):
|
|
490
1052
|
with self._mutex:
|
|
491
1053
|
return [n for n in self.nodes]
|
|
492
1054
|
|
|
1055
|
+
@property
|
|
1056
|
+
def pruned(self):
|
|
1057
|
+
return [p for p in self._pruned]
|
|
1058
|
+
|
|
493
1059
|
@property
|
|
494
1060
|
def roots(self):
|
|
495
1061
|
with self._mutex:
|
|
@@ -503,6 +1069,13 @@ class Graph(object):
|
|
|
503
1069
|
with self._mutex:
|
|
504
1070
|
return self._nodes_by_name.get(qualified_name)
|
|
505
1071
|
|
|
1072
|
+
def get_task_by_identity(self, identity):
|
|
1073
|
+
with self._mutex:
|
|
1074
|
+
for task in self.nodes:
|
|
1075
|
+
if task.identity == identity:
|
|
1076
|
+
return task
|
|
1077
|
+
return None
|
|
1078
|
+
|
|
506
1079
|
def select(self, func):
|
|
507
1080
|
with self._mutex:
|
|
508
1081
|
return [n for n in self.nodes if func(self, n)]
|
|
@@ -511,7 +1084,7 @@ class Graph(object):
|
|
|
511
1084
|
with self._mutex:
|
|
512
1085
|
log.debug("[GRAPH] Listing all nodes")
|
|
513
1086
|
for node in self.topological_nodes:
|
|
514
|
-
log.debug("[GRAPH] " + node.
|
|
1087
|
+
log.debug("[GRAPH] " + node.short_qualified_name + " ({})", len(self._children[node].keys()))
|
|
515
1088
|
|
|
516
1089
|
def is_leaf(self, node):
|
|
517
1090
|
with self._mutex:
|
|
@@ -531,25 +1104,31 @@ class Graph(object):
|
|
|
531
1104
|
|
|
532
1105
|
|
|
533
1106
|
class GraphBuilder(object):
|
|
534
|
-
def __init__(self, registry,
|
|
1107
|
+
def __init__(self, registry, cache, options=None, progress=False, buildenv=None):
|
|
1108
|
+
self.cache = cache
|
|
535
1109
|
self.graph = Graph()
|
|
536
1110
|
self.nodes = {}
|
|
537
1111
|
self.registry = registry
|
|
538
|
-
self.
|
|
1112
|
+
self.buildenv = buildenv
|
|
539
1113
|
self.progress = progress
|
|
540
1114
|
self.options = options or JoltOptions()
|
|
541
1115
|
|
|
542
|
-
def _get_node(self, progress, name):
|
|
1116
|
+
def _get_node(self, progress, name, parent=None):
|
|
543
1117
|
name = utils.stable_task_name(name)
|
|
544
1118
|
node = self.nodes.get(name)
|
|
545
1119
|
if not node:
|
|
546
|
-
task = self.registry.get_task(name,
|
|
1120
|
+
task = self.registry.get_task(name, buildenv=self.buildenv)
|
|
547
1121
|
node = self.nodes.get(task.qualified_name, None)
|
|
548
1122
|
if node is not None:
|
|
549
1123
|
return node
|
|
550
|
-
node = TaskProxy(task, self.graph, self.options)
|
|
551
|
-
|
|
552
|
-
|
|
1124
|
+
node = TaskProxy(task, self.graph, self.cache, self.options)
|
|
1125
|
+
if not node.is_resource() or node.is_workspace_resource():
|
|
1126
|
+
self.nodes[node.short_qualified_name] = node
|
|
1127
|
+
self.nodes[node.qualified_name] = node
|
|
1128
|
+
elif parent:
|
|
1129
|
+
node.set_owner(parent)
|
|
1130
|
+
if self.buildenv:
|
|
1131
|
+
task._apply_protobuf(self.buildenv)
|
|
553
1132
|
if self.options.salt:
|
|
554
1133
|
node.taint(self.options.salt)
|
|
555
1134
|
self._build_node(progress, node)
|
|
@@ -560,7 +1139,7 @@ class GraphBuilder(object):
|
|
|
560
1139
|
self.graph.add_node(node)
|
|
561
1140
|
|
|
562
1141
|
if node.task.extends:
|
|
563
|
-
extended_node = self._get_node(progress, node.task.extends)
|
|
1142
|
+
extended_node = self._get_node(progress, node.task.extends, parent=node)
|
|
564
1143
|
self.graph.add_edges_from([(node, extended_node)])
|
|
565
1144
|
node.set_extended_task(extended_node)
|
|
566
1145
|
extended_node.add_extension(node)
|
|
@@ -570,8 +1149,9 @@ class GraphBuilder(object):
|
|
|
570
1149
|
parent = node
|
|
571
1150
|
|
|
572
1151
|
for requirement in node.task.requires:
|
|
573
|
-
alias, task, name = utils.parse_aliased_task_name(requirement)
|
|
574
|
-
child = self._get_node(progress, utils.format_task_name(task, name))
|
|
1152
|
+
alias, _, task, name = utils.parse_aliased_task_name(requirement)
|
|
1153
|
+
child = self._get_node(progress, utils.format_task_name(task, name), parent=node)
|
|
1154
|
+
|
|
575
1155
|
# Create direct edges from alias parents to alias children
|
|
576
1156
|
if child.is_alias():
|
|
577
1157
|
for child_child in child.children:
|
|
@@ -604,9 +1184,14 @@ class GraphBuilder(object):
|
|
|
604
1184
|
topological_nodes = self.graph.topological_nodes
|
|
605
1185
|
with self._progress("Collecting task influence", len(self.graph.tasks), "tasks") as p:
|
|
606
1186
|
for node in reversed(topological_nodes):
|
|
607
|
-
node.finalize(self.graph
|
|
1187
|
+
node.finalize(self.graph)
|
|
608
1188
|
p.update(1)
|
|
609
1189
|
|
|
1190
|
+
# Create artifacts in forward order so that parent identities are available
|
|
1191
|
+
# when creating resource artifacts that depend on them.
|
|
1192
|
+
for node in topological_nodes:
|
|
1193
|
+
node.finalize_artifacts()
|
|
1194
|
+
|
|
610
1195
|
max_time = 0
|
|
611
1196
|
min_time = 0
|
|
612
1197
|
for node in topological_nodes:
|
|
@@ -628,6 +1213,8 @@ class GraphBuilder(object):
|
|
|
628
1213
|
goal_alias.set_goal()
|
|
629
1214
|
self.graph.goals.append(goal_alias)
|
|
630
1215
|
|
|
1216
|
+
self.graph.all_nodes = [n for n in self.graph.nodes]
|
|
1217
|
+
|
|
631
1218
|
return self.graph
|
|
632
1219
|
|
|
633
1220
|
|
|
@@ -637,7 +1224,8 @@ class PruneStrategy(object):
|
|
|
637
1224
|
|
|
638
1225
|
|
|
639
1226
|
class GraphPruner(object):
|
|
640
|
-
def __init__(self, strategy):
|
|
1227
|
+
def __init__(self, cache, strategy):
|
|
1228
|
+
self.cache = cache
|
|
641
1229
|
self.strategy = strategy
|
|
642
1230
|
self.retained = set()
|
|
643
1231
|
self.visited = set()
|