jolt 0.9.172__py3-none-any.whl → 0.9.435__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jolt/__init__.py +80 -7
- jolt/__main__.py +9 -1
- jolt/bin/fstree-darwin-x86_64 +0 -0
- jolt/bin/fstree-linux-x86_64 +0 -0
- jolt/cache.py +596 -252
- jolt/chroot.py +36 -11
- jolt/cli.py +143 -130
- jolt/common_pb2.py +45 -45
- jolt/config.py +76 -40
- jolt/error.py +19 -4
- jolt/filesystem.py +2 -6
- jolt/graph.py +400 -82
- jolt/influence.py +110 -3
- jolt/loader.py +338 -174
- jolt/log.py +127 -31
- jolt/manifest.py +13 -46
- jolt/options.py +35 -11
- jolt/pkgs/abseil.py +42 -0
- jolt/pkgs/asio.py +25 -0
- jolt/pkgs/autoconf.py +41 -0
- jolt/pkgs/automake.py +41 -0
- jolt/pkgs/b2.py +31 -0
- jolt/pkgs/boost.py +111 -0
- jolt/pkgs/boringssl.py +32 -0
- jolt/pkgs/busybox.py +39 -0
- jolt/pkgs/bzip2.py +43 -0
- jolt/pkgs/cares.py +29 -0
- jolt/pkgs/catch2.py +36 -0
- jolt/pkgs/cbindgen.py +17 -0
- jolt/pkgs/cista.py +19 -0
- jolt/pkgs/clang.py +44 -0
- jolt/pkgs/cli11.py +24 -0
- jolt/pkgs/cmake.py +48 -0
- jolt/pkgs/cpython.py +196 -0
- jolt/pkgs/crun.py +29 -0
- jolt/pkgs/curl.py +38 -0
- jolt/pkgs/dbus.py +18 -0
- jolt/pkgs/double_conversion.py +24 -0
- jolt/pkgs/fastfloat.py +21 -0
- jolt/pkgs/ffmpeg.py +28 -0
- jolt/pkgs/flatbuffers.py +29 -0
- jolt/pkgs/fmt.py +27 -0
- jolt/pkgs/fstree.py +20 -0
- jolt/pkgs/gflags.py +18 -0
- jolt/pkgs/glib.py +18 -0
- jolt/pkgs/glog.py +25 -0
- jolt/pkgs/glslang.py +21 -0
- jolt/pkgs/golang.py +16 -11
- jolt/pkgs/googlebenchmark.py +18 -0
- jolt/pkgs/googletest.py +46 -0
- jolt/pkgs/gperf.py +15 -0
- jolt/pkgs/grpc.py +73 -0
- jolt/pkgs/hdf5.py +19 -0
- jolt/pkgs/help2man.py +14 -0
- jolt/pkgs/inja.py +28 -0
- jolt/pkgs/jsoncpp.py +31 -0
- jolt/pkgs/libarchive.py +43 -0
- jolt/pkgs/libcap.py +44 -0
- jolt/pkgs/libdrm.py +44 -0
- jolt/pkgs/libedit.py +42 -0
- jolt/pkgs/libevent.py +31 -0
- jolt/pkgs/libexpat.py +27 -0
- jolt/pkgs/libfastjson.py +21 -0
- jolt/pkgs/libffi.py +16 -0
- jolt/pkgs/libglvnd.py +30 -0
- jolt/pkgs/libogg.py +28 -0
- jolt/pkgs/libpciaccess.py +18 -0
- jolt/pkgs/libseccomp.py +21 -0
- jolt/pkgs/libtirpc.py +24 -0
- jolt/pkgs/libtool.py +42 -0
- jolt/pkgs/libunwind.py +35 -0
- jolt/pkgs/libva.py +18 -0
- jolt/pkgs/libvorbis.py +33 -0
- jolt/pkgs/libxml2.py +35 -0
- jolt/pkgs/libxslt.py +17 -0
- jolt/pkgs/libyajl.py +16 -0
- jolt/pkgs/llvm.py +81 -0
- jolt/pkgs/lua.py +54 -0
- jolt/pkgs/lz4.py +26 -0
- jolt/pkgs/m4.py +14 -0
- jolt/pkgs/make.py +17 -0
- jolt/pkgs/mesa.py +81 -0
- jolt/pkgs/meson.py +17 -0
- jolt/pkgs/mstch.py +28 -0
- jolt/pkgs/mysql.py +60 -0
- jolt/pkgs/nasm.py +49 -0
- jolt/pkgs/ncurses.py +30 -0
- jolt/pkgs/ng_log.py +25 -0
- jolt/pkgs/ninja.py +45 -0
- jolt/pkgs/nlohmann_json.py +25 -0
- jolt/pkgs/nodejs.py +19 -11
- jolt/pkgs/opencv.py +24 -0
- jolt/pkgs/openjdk.py +26 -0
- jolt/pkgs/openssl.py +103 -0
- jolt/pkgs/paho.py +76 -0
- jolt/pkgs/patchelf.py +16 -0
- jolt/pkgs/perl.py +42 -0
- jolt/pkgs/pkgconfig.py +64 -0
- jolt/pkgs/poco.py +39 -0
- jolt/pkgs/protobuf.py +77 -0
- jolt/pkgs/pugixml.py +27 -0
- jolt/pkgs/python.py +19 -0
- jolt/pkgs/qt.py +35 -0
- jolt/pkgs/rapidjson.py +26 -0
- jolt/pkgs/rapidyaml.py +28 -0
- jolt/pkgs/re2.py +30 -0
- jolt/pkgs/re2c.py +17 -0
- jolt/pkgs/readline.py +15 -0
- jolt/pkgs/rust.py +41 -0
- jolt/pkgs/sdl.py +28 -0
- jolt/pkgs/simdjson.py +27 -0
- jolt/pkgs/soci.py +46 -0
- jolt/pkgs/spdlog.py +29 -0
- jolt/pkgs/spirv_llvm.py +21 -0
- jolt/pkgs/spirv_tools.py +24 -0
- jolt/pkgs/sqlite.py +83 -0
- jolt/pkgs/ssl.py +12 -0
- jolt/pkgs/texinfo.py +15 -0
- jolt/pkgs/tomlplusplus.py +22 -0
- jolt/pkgs/wayland.py +26 -0
- jolt/pkgs/x11.py +58 -0
- jolt/pkgs/xerces_c.py +20 -0
- jolt/pkgs/xorg.py +360 -0
- jolt/pkgs/xz.py +29 -0
- jolt/pkgs/yamlcpp.py +30 -0
- jolt/pkgs/zeromq.py +47 -0
- jolt/pkgs/zlib.py +87 -0
- jolt/pkgs/zstd.py +33 -0
- jolt/plugins/alias.py +3 -0
- jolt/plugins/allure.py +2 -2
- jolt/plugins/autotools.py +66 -0
- jolt/plugins/cache.py +1 -1
- jolt/plugins/cmake.py +74 -6
- jolt/plugins/conan.py +238 -0
- jolt/plugins/cxxinfo.py +7 -0
- jolt/plugins/docker.py +76 -19
- jolt/plugins/email.xslt +141 -118
- jolt/plugins/environ.py +11 -0
- jolt/plugins/fetch.py +141 -0
- jolt/plugins/gdb.py +33 -14
- jolt/plugins/gerrit.py +0 -13
- jolt/plugins/git.py +248 -66
- jolt/plugins/googletest.py +1 -1
- jolt/plugins/http.py +1 -1
- jolt/plugins/libtool.py +63 -0
- jolt/plugins/linux.py +990 -0
- jolt/plugins/logstash.py +4 -4
- jolt/plugins/meson.py +61 -0
- jolt/plugins/ninja-compdb.py +96 -28
- jolt/plugins/ninja.py +424 -150
- jolt/plugins/paths.py +11 -1
- jolt/plugins/pkgconfig.py +219 -0
- jolt/plugins/podman.py +131 -87
- jolt/plugins/python.py +137 -0
- jolt/plugins/remote_execution/administration_pb2.py +27 -19
- jolt/plugins/remote_execution/log_pb2.py +12 -12
- jolt/plugins/remote_execution/scheduler_pb2.py +23 -23
- jolt/plugins/remote_execution/worker_pb2.py +19 -19
- jolt/plugins/report.py +7 -2
- jolt/plugins/rust.py +25 -0
- jolt/plugins/scheduler.py +135 -86
- jolt/plugins/selfdeploy/setup.py +6 -6
- jolt/plugins/selfdeploy.py +49 -31
- jolt/plugins/strings.py +35 -22
- jolt/plugins/symlinks.py +11 -4
- jolt/plugins/telemetry.py +1 -2
- jolt/plugins/timeline.py +13 -3
- jolt/scheduler.py +467 -165
- jolt/tasks.py +427 -111
- jolt/templates/timeline.html.template +44 -47
- jolt/timer.py +22 -0
- jolt/tools.py +527 -188
- jolt/utils.py +183 -3
- jolt/version.py +1 -1
- jolt/xmldom.py +12 -2
- {jolt-0.9.172.dist-info → jolt-0.9.435.dist-info}/METADATA +97 -41
- jolt-0.9.435.dist-info/RECORD +207 -0
- {jolt-0.9.172.dist-info → jolt-0.9.435.dist-info}/WHEEL +1 -1
- jolt/plugins/amqp.py +0 -855
- jolt/plugins/debian.py +0 -338
- jolt/plugins/repo.py +0 -253
- jolt/plugins/snap.py +0 -122
- jolt-0.9.172.dist-info/RECORD +0 -92
- {jolt-0.9.172.dist-info → jolt-0.9.435.dist-info}/entry_points.txt +0 -0
- {jolt-0.9.172.dist-info → jolt-0.9.435.dist-info}/top_level.txt +0 -0
jolt/scheduler.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
from concurrent.futures import ThreadPoolExecutor, as_completed, Future
|
|
2
2
|
import copy
|
|
3
|
+
from functools import wraps
|
|
3
4
|
import os
|
|
4
5
|
import queue
|
|
6
|
+
from threading import Lock
|
|
5
7
|
|
|
6
8
|
from jolt import common_pb2 as common_pb
|
|
7
9
|
from jolt import config
|
|
@@ -9,13 +11,11 @@ from jolt import hooks
|
|
|
9
11
|
from jolt import log
|
|
10
12
|
from jolt import utils
|
|
11
13
|
from jolt import tools
|
|
12
|
-
from jolt.error import raise_error
|
|
13
14
|
from jolt.error import raise_task_error
|
|
14
15
|
from jolt.error import raise_task_error_if
|
|
15
16
|
from jolt.graph import PruneStrategy
|
|
16
|
-
from jolt.manifest import ManifestExtension
|
|
17
|
-
from jolt.manifest import ManifestExtensionRegistry
|
|
18
17
|
from jolt.options import JoltOptions
|
|
18
|
+
from jolt.timer import Timer
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class JoltEnvironment(object):
|
|
@@ -24,105 +24,218 @@ class JoltEnvironment(object):
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
class TaskQueue(object):
|
|
27
|
-
|
|
27
|
+
"""
|
|
28
|
+
A helper class for tracking tasks in progress and their completion.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self):
|
|
28
32
|
self.futures = {}
|
|
29
|
-
self.
|
|
30
|
-
self.cache = cache
|
|
31
|
-
self.session = session
|
|
33
|
+
self.futures_lock = Lock()
|
|
32
34
|
self.duration_acc = utils.duration_diff(0)
|
|
33
35
|
self._aborted = False
|
|
36
|
+
self._timer = Timer(60, self._log_task_running_time)
|
|
37
|
+
self._timer.start()
|
|
38
|
+
|
|
39
|
+
def _log_task_running_time(self):
|
|
40
|
+
with self.futures_lock:
|
|
41
|
+
for future in self.futures:
|
|
42
|
+
self.futures[future].task.log_running_time()
|
|
43
|
+
|
|
44
|
+
def submit(self, executor):
|
|
45
|
+
"""
|
|
46
|
+
Submit an exeuctor to the task queue for execution.
|
|
47
|
+
|
|
48
|
+
The method schedules the executor for execution and returns a Future object
|
|
49
|
+
that may be used to track completion of the task.
|
|
50
|
+
"""
|
|
34
51
|
|
|
35
|
-
def submit(self, task):
|
|
36
52
|
if self._aborted:
|
|
37
53
|
return None
|
|
38
54
|
|
|
39
|
-
env = JoltEnvironment(
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
"no executor can execute the task; "
|
|
44
|
-
"requesting a distributed network build without proper configuration?")
|
|
45
|
-
|
|
46
|
-
task.set_in_progress()
|
|
47
|
-
future = executor.submit(env)
|
|
48
|
-
self.futures[future] = executor
|
|
55
|
+
env = JoltEnvironment(queue=self)
|
|
56
|
+
future = executor.schedule(env)
|
|
57
|
+
with self.futures_lock:
|
|
58
|
+
self.futures[future] = executor
|
|
49
59
|
return future
|
|
50
60
|
|
|
51
61
|
def wait(self):
|
|
62
|
+
"""
|
|
63
|
+
Wait for any task to complete.
|
|
64
|
+
|
|
65
|
+
The method waits for the next task to complete and returns the task and any
|
|
66
|
+
exception that may have occurred during execution. If no task is in progress,
|
|
67
|
+
the method returns None, None.
|
|
68
|
+
"""
|
|
69
|
+
|
|
52
70
|
for future in as_completed(self.futures):
|
|
53
71
|
task = self.futures[future].task
|
|
54
72
|
try:
|
|
55
73
|
future.result()
|
|
56
74
|
except Exception as error:
|
|
57
|
-
log.exception()
|
|
58
75
|
return task, error
|
|
59
76
|
finally:
|
|
60
77
|
self.duration_acc += task.duration_running or 0
|
|
61
|
-
|
|
78
|
+
with self.futures_lock:
|
|
79
|
+
del self.futures[future]
|
|
62
80
|
return task, None
|
|
63
81
|
return None, None
|
|
64
82
|
|
|
65
83
|
def abort(self):
|
|
84
|
+
"""
|
|
85
|
+
Abort all tasks in progress.
|
|
86
|
+
|
|
87
|
+
The method cancels all tasks in progress and prevents any new tasks from being
|
|
88
|
+
submitted to the task queue. The method doesn't wait for all tasks to complete
|
|
89
|
+
before returning.
|
|
90
|
+
"""
|
|
66
91
|
self._aborted = True
|
|
67
|
-
|
|
68
|
-
executor.
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
92
|
+
with self.futures_lock:
|
|
93
|
+
for future, executor in self.futures.items():
|
|
94
|
+
executor.cancel()
|
|
95
|
+
future.cancel()
|
|
96
|
+
if len(self.futures):
|
|
97
|
+
log.info("Waiting for tasks to finish, please be patient")
|
|
98
|
+
self._timer.cancel()
|
|
99
|
+
|
|
100
|
+
def shutdown(self):
|
|
101
|
+
"""
|
|
102
|
+
Shutdown the task queue.
|
|
103
|
+
"""
|
|
104
|
+
self._timer.cancel()
|
|
73
105
|
|
|
74
106
|
def is_aborted(self):
|
|
107
|
+
""" Returns true if the task queue has been aborted. """
|
|
75
108
|
return self._aborted
|
|
76
109
|
|
|
77
110
|
def in_progress(self, task):
|
|
78
|
-
|
|
111
|
+
""" Returns true if the task is in progress. """
|
|
112
|
+
with self.futures_lock:
|
|
113
|
+
return task in self.futures.values()
|
|
79
114
|
|
|
80
115
|
def empty(self):
|
|
81
|
-
|
|
116
|
+
""" Returns true if the task queue is empty. """
|
|
117
|
+
with self.futures_lock:
|
|
118
|
+
return len(self.futures) == 0
|
|
82
119
|
|
|
83
120
|
|
|
84
121
|
class Executor(object):
|
|
122
|
+
"""
|
|
123
|
+
Base class for all executors.
|
|
124
|
+
|
|
125
|
+
An executor is responsible for running a task. It is created by an executor
|
|
126
|
+
factory and is submitted to a task queue. The factory is also
|
|
127
|
+
responsible for hosting a thread pool that will run the executors it creates.
|
|
128
|
+
|
|
129
|
+
The type of executor created by the factory depends on the execution strategy
|
|
130
|
+
selected by the user through command line options. The strategy is responsible
|
|
131
|
+
for deciding which executor to create for each task.
|
|
132
|
+
|
|
133
|
+
An implementation of an executor must implement the run method, which is called
|
|
134
|
+
from the thread pool. The run method is responsible for running the task and
|
|
135
|
+
handling any exceptions that may occur during execution.
|
|
136
|
+
"""
|
|
137
|
+
|
|
85
138
|
def __init__(self, factory):
|
|
86
139
|
self.factory = factory
|
|
87
|
-
self._status = None
|
|
88
140
|
|
|
89
|
-
def
|
|
141
|
+
def schedule(self, env):
|
|
142
|
+
""" Schedule the task for execution.
|
|
143
|
+
|
|
144
|
+
This method is called by the task queue to schedule the task for execution
|
|
145
|
+
in the factory thread pool. The method must return a Future object that
|
|
146
|
+
represents the task execution. The Future object is used to track the
|
|
147
|
+
execution of the task and to retrieve the result of the execution
|
|
148
|
+
once it is completed.
|
|
149
|
+
|
|
150
|
+
The method must be implemented by all executors. They must call the
|
|
151
|
+
factory submit method to schedule the task for execution and also
|
|
152
|
+
mark the task as in progress with set_in_progress().
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
env: The JoltEnvironment object that contains the queue and cache objects.
|
|
156
|
+
|
|
157
|
+
"""
|
|
90
158
|
return self.factory.submit(self, env)
|
|
91
159
|
|
|
92
160
|
def cancel(self):
|
|
161
|
+
"""
|
|
162
|
+
Cancel the task.
|
|
163
|
+
|
|
164
|
+
This method is optional and may be implemented by executors that support
|
|
165
|
+
cancellation of tasks, such as network executors where a remote scheduler
|
|
166
|
+
may be able to cancel a task that is already running.
|
|
167
|
+
|
|
168
|
+
By default, the method does nothing.
|
|
169
|
+
"""
|
|
93
170
|
pass
|
|
94
171
|
|
|
95
172
|
def is_aborted(self):
|
|
173
|
+
""" Check if executor has been aborted. """
|
|
96
174
|
return self.factory.is_aborted()
|
|
97
175
|
|
|
98
176
|
def run(self, env):
|
|
99
|
-
|
|
177
|
+
"""
|
|
178
|
+
Run the task.
|
|
179
|
+
|
|
180
|
+
This method must be implemented by all executors. It is called from the
|
|
181
|
+
factory thread pool and is responsible for running the task
|
|
182
|
+
and handling any exceptions that may occur during execution.
|
|
183
|
+
Any exceptions raised by the task must, if caught, be re-raised to
|
|
184
|
+
the caller unless the task is marked as unstable, in which case the
|
|
185
|
+
exception should be logged and ignored.
|
|
186
|
+
|
|
187
|
+
The task run() method shall be run within a hooks.task_run()
|
|
188
|
+
context manager to ensure that the task status is recognized by
|
|
189
|
+
the report hooks and other plugins.
|
|
190
|
+
|
|
191
|
+
Network executors have additional requirements. See the
|
|
192
|
+
NetworkExecutor class for more information.
|
|
193
|
+
"""
|
|
194
|
+
raise NotImplementedError
|
|
100
195
|
|
|
101
196
|
|
|
102
197
|
class LocalExecutor(Executor):
|
|
198
|
+
"""
|
|
199
|
+
An Executor that runs a task locally.
|
|
200
|
+
|
|
201
|
+
The executor runs the task on the local machine. The task is run
|
|
202
|
+
by calling the task.run() method.
|
|
203
|
+
|
|
204
|
+
The executor is created by the local executor factory and is
|
|
205
|
+
typically run sequentially with other executors.
|
|
206
|
+
"""
|
|
207
|
+
|
|
103
208
|
def __init__(self, factory, task, force_upload=False, force_build=False):
|
|
104
209
|
super().__init__(factory)
|
|
105
210
|
self.task = task
|
|
106
211
|
self.force_build = force_build
|
|
107
212
|
self.force_upload = force_upload
|
|
108
213
|
|
|
214
|
+
def schedule(self, env):
|
|
215
|
+
"""
|
|
216
|
+
Schedule the task for execution.
|
|
217
|
+
|
|
218
|
+
The task is marked as in progress before scheduling.
|
|
219
|
+
"""
|
|
220
|
+
self.task.set_in_progress()
|
|
221
|
+
return super().schedule(env)
|
|
222
|
+
|
|
109
223
|
def _run(self, env, task):
|
|
110
224
|
if self.is_aborted():
|
|
111
225
|
return
|
|
112
226
|
try:
|
|
113
227
|
with hooks.task_run(task):
|
|
114
228
|
self.task.run(
|
|
115
|
-
env
|
|
229
|
+
env,
|
|
116
230
|
force_build=self.force_build,
|
|
117
231
|
force_upload=self.force_upload)
|
|
118
232
|
|
|
119
233
|
except Exception as e:
|
|
120
|
-
log.exception()
|
|
234
|
+
log.exception(e, error=False)
|
|
121
235
|
if not task.is_unstable:
|
|
236
|
+
self.task.raise_for_status(log_error=getattr(env, "worker", False))
|
|
122
237
|
raise e
|
|
123
238
|
|
|
124
|
-
return task
|
|
125
|
-
|
|
126
239
|
def get_all_extensions(self, task):
|
|
127
240
|
extensions = copy.copy(task.extensions)
|
|
128
241
|
for ext in extensions:
|
|
@@ -138,15 +251,61 @@ class LocalExecutor(Executor):
|
|
|
138
251
|
|
|
139
252
|
|
|
140
253
|
class NetworkExecutor(Executor):
|
|
141
|
-
|
|
254
|
+
def run(self, env):
|
|
255
|
+
"""
|
|
256
|
+
Run the task.
|
|
257
|
+
|
|
258
|
+
See the Executor class for basic information.
|
|
259
|
+
|
|
260
|
+
Network executors have additional requirements. Before scheduling
|
|
261
|
+
the task to a remote scheduler, the executor must call
|
|
262
|
+
run_resources() on the task. This acquires any Resources marked
|
|
263
|
+
local=True and uploads the resulting session artifacts
|
|
264
|
+
to the remote cache.
|
|
265
|
+
|
|
266
|
+
Once the task has been submitted to the remote scheduler, the executor
|
|
267
|
+
must run task.queued() on the task and its extensions. This is done
|
|
268
|
+
to ensure that the task status is correctly reported to the
|
|
269
|
+
user.
|
|
270
|
+
|
|
271
|
+
For any change in state of task, the executor must run one of:
|
|
272
|
+
|
|
273
|
+
- task.running_execution(remote=True) - when the task has started
|
|
274
|
+
- task.failed_execution(remote=True) - when the task has failed
|
|
275
|
+
- task.failed_execution(remote=True, interrupt=True) - when the
|
|
276
|
+
task has been interrupted, e.g. by a user request or rescheduling
|
|
277
|
+
- task.finished_execution(remote=True) - when the task has passed
|
|
278
|
+
|
|
279
|
+
Upon completion of the task, whether successful or not, task
|
|
280
|
+
session artifacts must be downloaded to the local cache, if
|
|
281
|
+
the task is marked as downloadable. This is done by calling
|
|
282
|
+
task.download() with the session_only flag set to True.
|
|
283
|
+
|
|
284
|
+
Persistent artifacts are downloaded only if the task is successful
|
|
285
|
+
and the task is marked as downloadable.
|
|
286
|
+
"""
|
|
287
|
+
raise NotImplementedError
|
|
142
288
|
|
|
143
289
|
|
|
144
290
|
class SkipTask(Executor):
|
|
291
|
+
"""
|
|
292
|
+
An Executor that skips a task.
|
|
293
|
+
|
|
294
|
+
This executor is created by the concurrent executor factory when a task
|
|
295
|
+
is skipped, i.e. when the task artifacts are already available locally or
|
|
296
|
+
remotely and the task does not need to be run.
|
|
297
|
+
"""
|
|
298
|
+
|
|
145
299
|
def __init__(self, factory, task, *args, **kwargs):
|
|
146
300
|
super().__init__(factory, *args, **kwargs)
|
|
147
301
|
self.task = task
|
|
148
302
|
|
|
149
303
|
def run(self, env):
|
|
304
|
+
"""
|
|
305
|
+
Skip the task.
|
|
306
|
+
|
|
307
|
+
The task and its extensions are marked as skipped.
|
|
308
|
+
"""
|
|
150
309
|
self.task.skipped()
|
|
151
310
|
for ext in self.task.extensions:
|
|
152
311
|
ext.skipped()
|
|
@@ -154,11 +313,30 @@ class SkipTask(Executor):
|
|
|
154
313
|
|
|
155
314
|
|
|
156
315
|
class Downloader(Executor):
|
|
316
|
+
"""
|
|
317
|
+
An Executor that downloads task artifacts.
|
|
318
|
+
|
|
319
|
+
The executor downloads the task artifacts and its extensions from the
|
|
320
|
+
remote cache to the local cache. Failure to download the artifacts
|
|
321
|
+
is reported by raising an exception.
|
|
322
|
+
|
|
323
|
+
Downloader executors are typically run in parallel with other executors.
|
|
324
|
+
|
|
325
|
+
"""
|
|
157
326
|
def __init__(self, factory, task, *args, **kwargs):
|
|
158
327
|
super().__init__(factory, *args, **kwargs)
|
|
159
328
|
self.task = task
|
|
160
329
|
|
|
161
|
-
def
|
|
330
|
+
def schedule(self, env):
|
|
331
|
+
"""
|
|
332
|
+
Schedule the task for execution.
|
|
333
|
+
|
|
334
|
+
The task is marked as in progress before scheduling.
|
|
335
|
+
"""
|
|
336
|
+
self.task.set_in_progress()
|
|
337
|
+
return super().schedule(env)
|
|
338
|
+
|
|
339
|
+
def _download(self, task):
|
|
162
340
|
if self.is_aborted():
|
|
163
341
|
return
|
|
164
342
|
if not task.is_downloadable():
|
|
@@ -177,18 +355,39 @@ class Downloader(Executor):
|
|
|
177
355
|
task.finished_download()
|
|
178
356
|
|
|
179
357
|
def run(self, env):
|
|
180
|
-
|
|
358
|
+
""" Downloads artifacts. """
|
|
359
|
+
|
|
360
|
+
self._download(self.task)
|
|
181
361
|
for ext in self.task.extensions:
|
|
182
|
-
self._download(
|
|
362
|
+
self._download(ext)
|
|
183
363
|
return self.task
|
|
184
364
|
|
|
185
365
|
|
|
186
366
|
class Uploader(Executor):
|
|
367
|
+
"""
|
|
368
|
+
An Executor that uploads task artifacts.
|
|
369
|
+
|
|
370
|
+
The executor uploads the task artifacts and its extensions from the
|
|
371
|
+
local cache to the remote cache. Failure to upload the artifacts
|
|
372
|
+
is reported by raising an exception.
|
|
373
|
+
|
|
374
|
+
Uploader executors are typically run in parallel with other executors.
|
|
375
|
+
"""
|
|
376
|
+
|
|
187
377
|
def __init__(self, factory, task, *args, **kwargs):
|
|
188
378
|
super().__init__(factory, *args, **kwargs)
|
|
189
379
|
self.task = task
|
|
190
380
|
|
|
191
|
-
def
|
|
381
|
+
def schedule(self, env):
|
|
382
|
+
"""
|
|
383
|
+
Schedule the task for execution.
|
|
384
|
+
|
|
385
|
+
The task is marked as in progress before scheduling.
|
|
386
|
+
"""
|
|
387
|
+
self.task.set_in_progress()
|
|
388
|
+
return super().schedule(env)
|
|
389
|
+
|
|
390
|
+
def _upload(self, task):
|
|
192
391
|
if self.is_aborted():
|
|
193
392
|
return
|
|
194
393
|
try:
|
|
@@ -205,50 +404,79 @@ class Uploader(Executor):
|
|
|
205
404
|
task.finished_upload()
|
|
206
405
|
|
|
207
406
|
def run(self, env):
|
|
208
|
-
|
|
407
|
+
""" Uploads artifacts. """
|
|
408
|
+
|
|
409
|
+
self._upload(self.task)
|
|
209
410
|
for ext in self.task.extensions:
|
|
210
|
-
self._upload(
|
|
411
|
+
self._upload(ext)
|
|
211
412
|
|
|
212
413
|
return self.task
|
|
213
414
|
|
|
214
415
|
|
|
215
416
|
@utils.Singleton
|
|
216
417
|
class ExecutorRegistry(object):
|
|
418
|
+
"""
|
|
419
|
+
The ExecutorRegistry is responsible for creating executors.
|
|
420
|
+
|
|
421
|
+
The types of executors that are possible to create are:
|
|
422
|
+
|
|
423
|
+
- create_local: Runs tasks locally.
|
|
424
|
+
- create_network: Schedules tasks for remote execution.
|
|
425
|
+
- create_downloader: Downloads task artifacts.
|
|
426
|
+
- create_uploader: Uploads task artifacts.
|
|
427
|
+
- create_skipper: Skips tasks.
|
|
428
|
+
|
|
429
|
+
The registry utilizes different ExecutorFactory objects to create executors. Plugins
|
|
430
|
+
can register their own NetworkExecutorFactory objects with the help of the
|
|
431
|
+
ExecutorFactory.Register decorator.
|
|
432
|
+
"""
|
|
433
|
+
|
|
217
434
|
executor_factories = []
|
|
218
|
-
extension_factories = []
|
|
219
435
|
|
|
220
436
|
def __init__(self, options=None):
|
|
221
437
|
self._options = options or JoltOptions()
|
|
222
438
|
self._factories = [factory(self._options) for factory in self.__class__.executor_factories]
|
|
223
439
|
self._local_factory = LocalExecutorFactory(self._options)
|
|
224
440
|
self._concurrent_factory = ConcurrentLocalExecutorFactory(self._options)
|
|
225
|
-
self._extensions = [factory().create() for factory in self.__class__.extension_factories]
|
|
226
441
|
|
|
227
442
|
def shutdown(self):
|
|
443
|
+
""" Shuts all executor factories and thread-pools down """
|
|
444
|
+
|
|
228
445
|
for factory in self._factories:
|
|
229
446
|
factory.shutdown()
|
|
230
447
|
self._local_factory.shutdown()
|
|
231
448
|
self._concurrent_factory.shutdown()
|
|
232
449
|
|
|
233
450
|
def create_session(self, graph):
|
|
451
|
+
""" Creates a session for all factories. """
|
|
234
452
|
return {factory: factory.create_session(graph) for factory in self._factories}
|
|
235
453
|
|
|
236
454
|
def create_skipper(self, task):
|
|
455
|
+
""" Creates an executor that skips a task. """
|
|
237
456
|
return SkipTask(self._concurrent_factory, task)
|
|
238
457
|
|
|
239
458
|
def create_downloader(self, task):
|
|
240
|
-
|
|
459
|
+
""" Creates an executor that downloads task artifacts. """
|
|
241
460
|
return Downloader(self._concurrent_factory, task)
|
|
242
461
|
|
|
243
462
|
def create_uploader(self, task):
|
|
244
|
-
|
|
463
|
+
""" Creates an executor that uploads task artifacts. """
|
|
245
464
|
return Uploader(self._concurrent_factory, task)
|
|
246
465
|
|
|
247
466
|
def create_local(self, task, force=False):
|
|
467
|
+
""" Creates an executor that runs a task locally. """
|
|
248
468
|
task.set_locally_executed()
|
|
249
469
|
return self._local_factory.create(task, force=force)
|
|
250
470
|
|
|
251
471
|
def create_network(self, session, task):
|
|
472
|
+
"""
|
|
473
|
+
Creates an executor that schedules a task for remote execution.
|
|
474
|
+
|
|
475
|
+
All registred network executor factories are queried to create an executor.
|
|
476
|
+
The first factory that can create an executor is used. If no factory is able
|
|
477
|
+
to create an executor, a local executor is created as fallback.
|
|
478
|
+
"""
|
|
479
|
+
|
|
252
480
|
for factory in self._factories:
|
|
253
481
|
executor = factory.create(session[factory], task)
|
|
254
482
|
if executor is not None:
|
|
@@ -256,54 +484,53 @@ class ExecutorRegistry(object):
|
|
|
256
484
|
return executor
|
|
257
485
|
return self.create_local(task)
|
|
258
486
|
|
|
259
|
-
def get_network_parameters(self, task):
|
|
260
|
-
parameters = {}
|
|
261
|
-
for extension in self._extensions:
|
|
262
|
-
parameters.update(extension.get_parameters(task))
|
|
263
|
-
return parameters
|
|
264
487
|
|
|
488
|
+
class ExecutorFactory(object):
|
|
489
|
+
"""
|
|
490
|
+
The ExecutorFactory class is responsible for creating executors.
|
|
265
491
|
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
def Register(cls):
|
|
269
|
-
ExecutorRegistry.extension_factories.insert(0, cls)
|
|
270
|
-
return cls
|
|
271
|
-
|
|
272
|
-
def create(self):
|
|
273
|
-
raise NotImplementedError()
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
class NetworkExecutorExtension(object):
|
|
277
|
-
def get_parameters(self, task):
|
|
278
|
-
return {}
|
|
279
|
-
|
|
492
|
+
The factory is responsible for creating executors that run tasks. The factory
|
|
493
|
+
is also responsible for hosting a thread pool that will run the executors it creates.
|
|
280
494
|
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
self.executor = executor
|
|
286
|
-
self.env = env
|
|
495
|
+
"""
|
|
496
|
+
class QueueItem(object):
|
|
497
|
+
"""
|
|
498
|
+
The type of item that is put into the queue thread-pool queue.
|
|
287
499
|
|
|
288
|
-
|
|
289
|
-
|
|
500
|
+
It wraps the executor and its priority.
|
|
501
|
+
"""
|
|
502
|
+
def __init__(self, priority: int, future: Future, executor: Executor, env: JoltEnvironment):
|
|
503
|
+
self.priority = priority
|
|
504
|
+
self.future = future
|
|
505
|
+
self.executor = executor
|
|
506
|
+
self.env = env
|
|
290
507
|
|
|
291
|
-
|
|
292
|
-
|
|
508
|
+
def __le__(self, o):
|
|
509
|
+
return self.priority <= o.priority
|
|
293
510
|
|
|
294
|
-
|
|
295
|
-
|
|
511
|
+
def __ge__(self, o):
|
|
512
|
+
return self.priority >= o.priority
|
|
296
513
|
|
|
297
|
-
|
|
298
|
-
|
|
514
|
+
def __lt__(self, o):
|
|
515
|
+
return self.priority < o.priority
|
|
299
516
|
|
|
300
|
-
|
|
301
|
-
|
|
517
|
+
def __gt__(self, o):
|
|
518
|
+
return self.priority > o.priority
|
|
302
519
|
|
|
520
|
+
def __eq__(self, o):
|
|
521
|
+
return self.priority == o.priority
|
|
303
522
|
|
|
304
|
-
class ExecutorFactory(object):
|
|
305
523
|
@staticmethod
|
|
306
524
|
def Register(cls):
|
|
525
|
+
"""
|
|
526
|
+
Decorator to register an executor factory.
|
|
527
|
+
|
|
528
|
+
The decorator is used by plugins that whish to register their own
|
|
529
|
+
executor factories. Such factories are used by the ExecutorRegistry
|
|
530
|
+
to create executors for tasks, as determined by the execution strategy
|
|
531
|
+
selected by the user.
|
|
532
|
+
"""
|
|
533
|
+
|
|
307
534
|
ExecutorRegistry.executor_factories.insert(0, cls)
|
|
308
535
|
return cls
|
|
309
536
|
|
|
@@ -314,43 +541,76 @@ class ExecutorFactory(object):
|
|
|
314
541
|
self._options = options or JoltOptions()
|
|
315
542
|
|
|
316
543
|
def is_aborted(self):
|
|
544
|
+
""" Returns true if the build and thus the factory has been aborted. """
|
|
317
545
|
return self._aborted
|
|
318
546
|
|
|
319
547
|
def is_keep_going(self):
|
|
548
|
+
""" Returns true if the build should continue even if a task fails. """
|
|
320
549
|
return self._options.keep_going
|
|
321
550
|
|
|
322
551
|
def shutdown(self):
|
|
552
|
+
"""
|
|
553
|
+
Called to shutdown the factory and its thread-pool.
|
|
554
|
+
|
|
555
|
+
The method is called when the build is complete or when the build is aborted.
|
|
556
|
+
After the method is called, no more tasks can be submitted to the factory and
|
|
557
|
+
the is_aborted() method will return True.
|
|
558
|
+
"""
|
|
323
559
|
self._aborted = True
|
|
324
560
|
self.pool.shutdown()
|
|
325
561
|
|
|
326
562
|
def create(self, task):
|
|
563
|
+
"""
|
|
564
|
+
Create an executor for the provided task.
|
|
565
|
+
|
|
566
|
+
Must be implemented by all executor factories. The method must return
|
|
567
|
+
an executor that is capable of running the task. The executor must be
|
|
568
|
+
created with the factory as its parent so that it can be submitted to
|
|
569
|
+
the correct thread-pool for execution.
|
|
570
|
+
"""
|
|
327
571
|
raise NotImplementedError()
|
|
328
572
|
|
|
329
573
|
def _run(self):
|
|
330
|
-
|
|
574
|
+
item = self._queue.get(False)
|
|
331
575
|
self._queue.task_done()
|
|
332
576
|
try:
|
|
333
577
|
if not self.is_aborted():
|
|
334
|
-
|
|
578
|
+
item.executor.run(item.env)
|
|
335
579
|
except KeyboardInterrupt as e:
|
|
336
|
-
raise_error("Interrupted by user")
|
|
337
580
|
self._aborted = True
|
|
338
|
-
|
|
581
|
+
item.future.set_exception(e)
|
|
339
582
|
except Exception as e:
|
|
340
583
|
if not self.is_keep_going():
|
|
341
584
|
self._aborted = True
|
|
342
|
-
|
|
585
|
+
item.future.set_exception(e)
|
|
343
586
|
else:
|
|
344
|
-
|
|
587
|
+
item.future.set_result(item.executor)
|
|
345
588
|
|
|
346
589
|
def submit(self, executor, env):
|
|
590
|
+
"""
|
|
591
|
+
Submit an executor to the thread-pool for execution.
|
|
592
|
+
|
|
593
|
+
The method submits the executor to the thread-pool for execution. The executor
|
|
594
|
+
is wrapped in a Future object that is returned to the caller. The Future object
|
|
595
|
+
is used to track the execution of the task and to retrieve the result of the
|
|
596
|
+
execution once it is completed.
|
|
597
|
+
"""
|
|
347
598
|
future = Future()
|
|
348
|
-
self._queue.put(
|
|
599
|
+
self._queue.put(ExecutorFactory.QueueItem(-executor.task.weight, future, executor, env))
|
|
349
600
|
self.pool.submit(self._run)
|
|
350
601
|
return future
|
|
351
602
|
|
|
352
603
|
|
|
353
604
|
class LocalExecutorFactory(ExecutorFactory):
|
|
605
|
+
"""
|
|
606
|
+
Factory for creating local executors.
|
|
607
|
+
|
|
608
|
+
The factory creates executors that run tasks locally. Typically,
|
|
609
|
+
only one LocalExecutor is allowed to run at a time, unless the
|
|
610
|
+
user has specified a higher number of parallel tasks in the
|
|
611
|
+
configuration file or through command line options (-j).
|
|
612
|
+
"""
|
|
613
|
+
|
|
354
614
|
def __init__(self, options=None):
|
|
355
615
|
max_workers = config.getint(
|
|
356
616
|
"jolt", "parallel_tasks",
|
|
@@ -360,10 +620,19 @@ class LocalExecutorFactory(ExecutorFactory):
|
|
|
360
620
|
max_workers=max_workers)
|
|
361
621
|
|
|
362
622
|
def create(self, task, force=False):
|
|
623
|
+
""" Create a LocalExecutor for the task. """
|
|
363
624
|
return LocalExecutor(self, task, force_build=force)
|
|
364
625
|
|
|
365
626
|
|
|
366
627
|
class ConcurrentLocalExecutorFactory(ExecutorFactory):
|
|
628
|
+
"""
|
|
629
|
+
A shared factory for local executors that are allowed to run concurrently.
|
|
630
|
+
|
|
631
|
+
The factory cannot create any executors on its own. Instead, its executors
|
|
632
|
+
are created by the ExecutorRegistry. The factory thread-pool is then used to
|
|
633
|
+
run executors concurrently.
|
|
634
|
+
"""
|
|
635
|
+
|
|
367
636
|
def __init__(self, options=None):
|
|
368
637
|
max_workers = tools.Tools().thread_count()
|
|
369
638
|
super().__init__(
|
|
@@ -375,6 +644,10 @@ class ConcurrentLocalExecutorFactory(ExecutorFactory):
|
|
|
375
644
|
|
|
376
645
|
|
|
377
646
|
class NetworkExecutorFactory(ExecutorFactory):
|
|
647
|
+
"""
|
|
648
|
+
Base class for executors that schedule task executions remotely in a build cluster.
|
|
649
|
+
"""
|
|
650
|
+
|
|
378
651
|
def __init__(self, *args, **kwargs):
|
|
379
652
|
super().__init__(*args, **kwargs)
|
|
380
653
|
|
|
@@ -382,18 +655,73 @@ class NetworkExecutorFactory(ExecutorFactory):
|
|
|
382
655
|
raise NotImplementedError()
|
|
383
656
|
|
|
384
657
|
|
|
658
|
+
def ensure_executor_return(func):
|
|
659
|
+
""" Decorator to ensure that an executor is returned by factories. """
|
|
660
|
+
|
|
661
|
+
@wraps(func)
|
|
662
|
+
def wrapper(self, session, task):
|
|
663
|
+
executor = func(self, session, task)
|
|
664
|
+
raise_task_error_if(
|
|
665
|
+
not executor, task,
|
|
666
|
+
"no executor can execute the task; "
|
|
667
|
+
"requesting a distributed network build without proper configuration?")
|
|
668
|
+
return executor
|
|
669
|
+
|
|
670
|
+
return wrapper
|
|
671
|
+
|
|
672
|
+
|
|
385
673
|
class ExecutionStrategy(object):
|
|
674
|
+
"""
|
|
675
|
+
Base class for all execution strategies.
|
|
676
|
+
|
|
677
|
+
An execution strategy is responsible for deciding which executor to create for each task.
|
|
678
|
+
The decision is based on the type of task and the availability of the task's artifacts in
|
|
679
|
+
local and remote caches.
|
|
680
|
+
|
|
681
|
+
The strategy is also responsible for deciding if task requirements should be pruned
|
|
682
|
+
from the build graph. This is done to avoid processing tasks that are not needed for the build.
|
|
683
|
+
|
|
684
|
+
Strategies are selected by the user through command line options.
|
|
685
|
+
|
|
686
|
+
"""
|
|
386
687
|
def create_executor(self, session, task):
|
|
688
|
+
"""
|
|
689
|
+
Create an executor for the task.
|
|
690
|
+
|
|
691
|
+
The method must be implemented by all execution strategies. It is responsible for
|
|
692
|
+
creating an executor that is capable of running or processing the task. Creation
|
|
693
|
+
of an executor should be delegated to the ExecutorRegistry which has the knowledge
|
|
694
|
+
of all available executor factories.
|
|
695
|
+
"""
|
|
696
|
+
raise NotImplementedError()
|
|
697
|
+
|
|
698
|
+
def should_prune_requirements(self, task):
|
|
699
|
+
"""
|
|
700
|
+
Return True if the task requirements should be pruned from the build graph.
|
|
701
|
+
|
|
702
|
+
The method must be implemented by all execution strategies.
|
|
703
|
+
"""
|
|
387
704
|
raise NotImplementedError()
|
|
388
705
|
|
|
389
706
|
|
|
390
707
|
class LocalStrategy(ExecutionStrategy, PruneStrategy):
|
|
708
|
+
"""
|
|
709
|
+
Strategy for local builds.
|
|
710
|
+
|
|
711
|
+
By default, the strategy schedules tasks for local execution, unless the task
|
|
712
|
+
artifacts are available in the local cache. If available remotely, the strategy
|
|
713
|
+
will create a downloader executor to download the artifacts.
|
|
714
|
+
"""
|
|
715
|
+
|
|
391
716
|
def __init__(self, executors, cache):
|
|
392
717
|
self.executors = executors
|
|
393
718
|
self.cache = cache
|
|
394
719
|
|
|
720
|
+
@ensure_executor_return
|
|
395
721
|
def create_executor(self, session, task):
|
|
396
|
-
|
|
722
|
+
""" Create an executor for the task. """
|
|
723
|
+
|
|
724
|
+
if task.is_alias() or task.is_resource():
|
|
397
725
|
return self.executors.create_skipper(task)
|
|
398
726
|
if not task.is_cacheable():
|
|
399
727
|
return self.executors.create_local(task)
|
|
@@ -404,6 +732,8 @@ class LocalStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
404
732
|
return self.executors.create_local(task)
|
|
405
733
|
|
|
406
734
|
def should_prune_requirements(self, task):
|
|
735
|
+
""" Prune task requirements if possible """
|
|
736
|
+
|
|
407
737
|
if task.is_alias() or not task.is_cacheable():
|
|
408
738
|
return False
|
|
409
739
|
if task.is_available_locally():
|
|
@@ -414,10 +744,21 @@ class LocalStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
414
744
|
|
|
415
745
|
|
|
416
746
|
class DownloadStrategy(ExecutionStrategy, PruneStrategy):
|
|
747
|
+
"""
|
|
748
|
+
Strategy for downloading task artifacts.
|
|
749
|
+
|
|
750
|
+
The strategy is used when the user has requested that task artifacts be downloaded.
|
|
751
|
+
If the task artifacts are available in the local cache, the strategy will skip the
|
|
752
|
+
task. If the task artifacts are available in the remote cache, the strategy will
|
|
753
|
+
create a downloader executor to download the artifacts. If the task artifacts are
|
|
754
|
+
not available in either cache, the strategy reports an error.
|
|
755
|
+
"""
|
|
756
|
+
|
|
417
757
|
def __init__(self, executors, cache):
|
|
418
758
|
self.executors = executors
|
|
419
759
|
self.cache = cache
|
|
420
760
|
|
|
761
|
+
@ensure_executor_return
|
|
421
762
|
def create_executor(self, session, task):
|
|
422
763
|
if task.is_alias():
|
|
423
764
|
return self.executors.create_skipper(task)
|
|
@@ -436,19 +777,27 @@ class DownloadStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
436
777
|
|
|
437
778
|
|
|
438
779
|
class DistributedStrategy(ExecutionStrategy, PruneStrategy):
|
|
780
|
+
"""
|
|
781
|
+
Strategy for distributed network builds.
|
|
782
|
+
|
|
783
|
+
By default, the strategy schedules tasks for remote execution, if there is no
|
|
784
|
+
artifact available. Otherwise, artifacts are either uploaded or downloaded as
|
|
785
|
+
needed.
|
|
786
|
+
"""
|
|
787
|
+
|
|
439
788
|
def __init__(self, executors, cache):
|
|
440
789
|
self.executors = executors
|
|
441
790
|
self.cache = cache
|
|
442
791
|
|
|
792
|
+
@ensure_executor_return
|
|
443
793
|
def create_executor(self, session, task):
|
|
444
|
-
|
|
794
|
+
""" Create an executor for the task. """
|
|
795
|
+
|
|
796
|
+
if task.is_alias() or task.is_resource():
|
|
445
797
|
return self.executors.create_skipper(task)
|
|
446
798
|
|
|
447
|
-
if task.
|
|
448
|
-
|
|
449
|
-
return self.executors.create_local(task)
|
|
450
|
-
else:
|
|
451
|
-
return self.executors.create_skipper(task)
|
|
799
|
+
if task.is_local():
|
|
800
|
+
return self.executors.create_local(task)
|
|
452
801
|
|
|
453
802
|
if not task.is_cacheable():
|
|
454
803
|
return self.executors.create_network(session, task)
|
|
@@ -472,11 +821,13 @@ class DistributedStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
472
821
|
if task.is_available_locally() and task.is_uploadable():
|
|
473
822
|
return self.executors.create_uploader(task)
|
|
474
823
|
if task.is_fast() and task.deps_available_locally():
|
|
475
|
-
return self.executors.create_local(task)
|
|
824
|
+
return self.executors.create_local(task, force=True)
|
|
476
825
|
|
|
477
826
|
return self.executors.create_network(session, task)
|
|
478
827
|
|
|
479
828
|
def should_prune_requirements(self, task):
|
|
829
|
+
""" Prune task requirements if possible """
|
|
830
|
+
|
|
480
831
|
if task.is_alias() or not task.is_cacheable():
|
|
481
832
|
return False
|
|
482
833
|
if task.is_available_remotely():
|
|
@@ -485,15 +836,24 @@ class DistributedStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
485
836
|
|
|
486
837
|
|
|
487
838
|
class WorkerStrategy(ExecutionStrategy, PruneStrategy):
|
|
839
|
+
"""
|
|
840
|
+
Strategy for worker builds.
|
|
841
|
+
|
|
842
|
+
This strategy is used on workers when the user has requested a network build.
|
|
843
|
+
It is similar to the LocalStrategy in that it will run tasks locally if no
|
|
844
|
+
artifacts are available. However, if artifacts are available locally, the
|
|
845
|
+
strategy will upload them to the remote cache.
|
|
846
|
+
"""
|
|
847
|
+
|
|
488
848
|
def __init__(self, executors, cache):
|
|
489
849
|
self.executors = executors
|
|
490
850
|
self.cache = cache
|
|
491
851
|
|
|
852
|
+
@ensure_executor_return
|
|
492
853
|
def create_executor(self, session, task):
|
|
493
|
-
|
|
494
|
-
return self.executors.create_local(task)
|
|
854
|
+
""" Create an executor for the task. """
|
|
495
855
|
|
|
496
|
-
if task.is_alias():
|
|
856
|
+
if task.is_alias() or task.is_resource():
|
|
497
857
|
return self.executors.create_skipper(task)
|
|
498
858
|
|
|
499
859
|
raise_task_error_if(
|
|
@@ -525,6 +885,8 @@ class WorkerStrategy(ExecutionStrategy, PruneStrategy):
|
|
|
525
885
|
return self.executors.create_local(task)
|
|
526
886
|
|
|
527
887
|
def should_prune_requirements(self, task):
|
|
888
|
+
""" Prune task requirements if possible """
|
|
889
|
+
|
|
528
890
|
if task.is_alias() or not task.is_cacheable():
|
|
529
891
|
return False
|
|
530
892
|
if task.is_available_locally():
|
|
@@ -545,76 +907,16 @@ def get_exported_task_set(task):
|
|
|
545
907
|
return list(set(children))
|
|
546
908
|
|
|
547
909
|
|
|
548
|
-
class TaskIdentityExtension(ManifestExtension):
|
|
549
|
-
def export_manifest(self, manifest, tasks):
|
|
550
|
-
# Generate a list of all tasks that must be evaluated
|
|
551
|
-
# for inclusion in the manifest
|
|
552
|
-
all_tasks = []
|
|
553
|
-
for task in tasks:
|
|
554
|
-
all_tasks += get_exported_task_set(task)
|
|
555
|
-
all_tasks = list(set(all_tasks))
|
|
556
|
-
|
|
557
|
-
for child in all_tasks:
|
|
558
|
-
manifest_task = manifest.find_task(child.qualified_name)
|
|
559
|
-
if manifest_task is None:
|
|
560
|
-
manifest_task = manifest.create_task()
|
|
561
|
-
manifest_task.name = child.qualified_name
|
|
562
|
-
manifest_task.identity = child.identity
|
|
563
|
-
manifest_task.instance = child.instance
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
ManifestExtensionRegistry.add(TaskIdentityExtension())
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
class TaskExportExtension(ManifestExtension):
|
|
570
|
-
def export_manifest(self, manifest, tasks):
|
|
571
|
-
short_task_names = set()
|
|
572
|
-
|
|
573
|
-
# Generate a list of all tasks that must be evaluated
|
|
574
|
-
# for inclusion in the manifest
|
|
575
|
-
all_tasks = []
|
|
576
|
-
for task in tasks:
|
|
577
|
-
all_tasks += get_exported_task_set(task)
|
|
578
|
-
all_tasks = list(set(all_tasks))
|
|
579
|
-
|
|
580
|
-
# Add all tasks with export attributes to the manifest
|
|
581
|
-
for child in all_tasks:
|
|
582
|
-
manifest_task = manifest.find_task(child.qualified_name)
|
|
583
|
-
if manifest_task is None:
|
|
584
|
-
manifest_task = manifest.create_task()
|
|
585
|
-
manifest_task.name = child.qualified_name
|
|
586
|
-
for key, export in child.task._get_export_objects().items():
|
|
587
|
-
attrib = manifest_task.create_attribute()
|
|
588
|
-
attrib.name = key
|
|
589
|
-
attrib.value = export.export(child.task)
|
|
590
|
-
short_task_names.add(child.name)
|
|
591
|
-
|
|
592
|
-
# Figure out if any task with an overridden default parameter
|
|
593
|
-
# value was included in the manifest. If so, add info about it.
|
|
594
|
-
default_task_names = set()
|
|
595
|
-
for task in all_tasks:
|
|
596
|
-
for task in task.options.default:
|
|
597
|
-
short_name, _ = utils.parse_task_name(task)
|
|
598
|
-
if short_name in short_task_names:
|
|
599
|
-
default_task_names.add(task)
|
|
600
|
-
if default_task_names:
|
|
601
|
-
build = manifest.create_build()
|
|
602
|
-
for task in default_task_names:
|
|
603
|
-
default = build.create_default()
|
|
604
|
-
default.name = task
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
ManifestExtensionRegistry.add(TaskExportExtension())
|
|
608
|
-
|
|
609
|
-
|
|
610
910
|
def export_tasks(tasks):
|
|
611
911
|
pb_tasks = {}
|
|
612
912
|
|
|
613
913
|
for task in tasks:
|
|
614
914
|
properties = []
|
|
615
915
|
for key, export in task.task._get_export_objects().items():
|
|
616
|
-
|
|
617
|
-
|
|
916
|
+
value = export.export(task.task)
|
|
917
|
+
if value is not None:
|
|
918
|
+
pb_attrib = common_pb.Property(key=key, value=str(value))
|
|
919
|
+
properties.append(pb_attrib)
|
|
618
920
|
|
|
619
921
|
platform = common_pb.Platform(
|
|
620
922
|
properties=[
|
|
@@ -632,7 +934,7 @@ def export_tasks(tasks):
|
|
|
632
934
|
properties=properties,
|
|
633
935
|
)
|
|
634
936
|
|
|
635
|
-
pb_tasks[task.
|
|
937
|
+
pb_tasks[task.exported_name] = common_pb.Task(**args)
|
|
636
938
|
|
|
637
939
|
return pb_tasks
|
|
638
940
|
|