jolt 0.9.76__py3-none-any.whl → 0.9.429__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (201) hide show
  1. jolt/__init__.py +88 -7
  2. jolt/__main__.py +9 -1
  3. jolt/bin/fstree-darwin-x86_64 +0 -0
  4. jolt/bin/fstree-linux-x86_64 +0 -0
  5. jolt/cache.py +839 -367
  6. jolt/chroot.py +156 -0
  7. jolt/cli.py +362 -143
  8. jolt/common_pb2.py +63 -0
  9. jolt/common_pb2_grpc.py +4 -0
  10. jolt/config.py +99 -42
  11. jolt/error.py +19 -4
  12. jolt/expires.py +2 -2
  13. jolt/filesystem.py +8 -6
  14. jolt/graph.py +705 -117
  15. jolt/hooks.py +63 -1
  16. jolt/influence.py +129 -6
  17. jolt/loader.py +369 -121
  18. jolt/log.py +225 -63
  19. jolt/manifest.py +28 -38
  20. jolt/options.py +35 -10
  21. jolt/pkgs/abseil.py +42 -0
  22. jolt/pkgs/asio.py +25 -0
  23. jolt/pkgs/autoconf.py +41 -0
  24. jolt/pkgs/automake.py +41 -0
  25. jolt/pkgs/b2.py +31 -0
  26. jolt/pkgs/boost.py +111 -0
  27. jolt/pkgs/boringssl.py +32 -0
  28. jolt/pkgs/busybox.py +39 -0
  29. jolt/pkgs/bzip2.py +43 -0
  30. jolt/pkgs/cares.py +29 -0
  31. jolt/pkgs/catch2.py +36 -0
  32. jolt/pkgs/cbindgen.py +17 -0
  33. jolt/pkgs/cista.py +19 -0
  34. jolt/pkgs/clang.py +44 -0
  35. jolt/pkgs/cli11.py +23 -0
  36. jolt/pkgs/cmake.py +48 -0
  37. jolt/pkgs/cpython.py +196 -0
  38. jolt/pkgs/crun.py +29 -0
  39. jolt/pkgs/curl.py +38 -0
  40. jolt/pkgs/dbus.py +18 -0
  41. jolt/pkgs/double_conversion.py +24 -0
  42. jolt/pkgs/fastfloat.py +21 -0
  43. jolt/pkgs/ffmpeg.py +28 -0
  44. jolt/pkgs/flatbuffers.py +29 -0
  45. jolt/pkgs/fmt.py +27 -0
  46. jolt/pkgs/fstree.py +20 -0
  47. jolt/pkgs/gflags.py +18 -0
  48. jolt/pkgs/glib.py +18 -0
  49. jolt/pkgs/glog.py +25 -0
  50. jolt/pkgs/glslang.py +21 -0
  51. jolt/pkgs/golang.py +16 -11
  52. jolt/pkgs/googlebenchmark.py +18 -0
  53. jolt/pkgs/googletest.py +46 -0
  54. jolt/pkgs/gperf.py +15 -0
  55. jolt/pkgs/grpc.py +73 -0
  56. jolt/pkgs/hdf5.py +19 -0
  57. jolt/pkgs/help2man.py +14 -0
  58. jolt/pkgs/inja.py +28 -0
  59. jolt/pkgs/jsoncpp.py +31 -0
  60. jolt/pkgs/libarchive.py +43 -0
  61. jolt/pkgs/libcap.py +44 -0
  62. jolt/pkgs/libdrm.py +44 -0
  63. jolt/pkgs/libedit.py +42 -0
  64. jolt/pkgs/libevent.py +31 -0
  65. jolt/pkgs/libexpat.py +27 -0
  66. jolt/pkgs/libfastjson.py +21 -0
  67. jolt/pkgs/libffi.py +16 -0
  68. jolt/pkgs/libglvnd.py +30 -0
  69. jolt/pkgs/libogg.py +28 -0
  70. jolt/pkgs/libpciaccess.py +18 -0
  71. jolt/pkgs/libseccomp.py +21 -0
  72. jolt/pkgs/libtirpc.py +24 -0
  73. jolt/pkgs/libtool.py +42 -0
  74. jolt/pkgs/libunwind.py +35 -0
  75. jolt/pkgs/libva.py +18 -0
  76. jolt/pkgs/libvorbis.py +33 -0
  77. jolt/pkgs/libxml2.py +35 -0
  78. jolt/pkgs/libxslt.py +17 -0
  79. jolt/pkgs/libyajl.py +16 -0
  80. jolt/pkgs/llvm.py +81 -0
  81. jolt/pkgs/lua.py +54 -0
  82. jolt/pkgs/lz4.py +26 -0
  83. jolt/pkgs/m4.py +14 -0
  84. jolt/pkgs/make.py +17 -0
  85. jolt/pkgs/mesa.py +81 -0
  86. jolt/pkgs/meson.py +17 -0
  87. jolt/pkgs/mstch.py +28 -0
  88. jolt/pkgs/mysql.py +60 -0
  89. jolt/pkgs/nasm.py +49 -0
  90. jolt/pkgs/ncurses.py +30 -0
  91. jolt/pkgs/ng_log.py +25 -0
  92. jolt/pkgs/ninja.py +45 -0
  93. jolt/pkgs/nlohmann_json.py +25 -0
  94. jolt/pkgs/nodejs.py +19 -11
  95. jolt/pkgs/opencv.py +24 -0
  96. jolt/pkgs/openjdk.py +26 -0
  97. jolt/pkgs/openssl.py +103 -0
  98. jolt/pkgs/paho.py +76 -0
  99. jolt/pkgs/patchelf.py +16 -0
  100. jolt/pkgs/perl.py +42 -0
  101. jolt/pkgs/pkgconfig.py +64 -0
  102. jolt/pkgs/poco.py +39 -0
  103. jolt/pkgs/protobuf.py +77 -0
  104. jolt/pkgs/pugixml.py +27 -0
  105. jolt/pkgs/python.py +19 -0
  106. jolt/pkgs/qt.py +35 -0
  107. jolt/pkgs/rapidjson.py +26 -0
  108. jolt/pkgs/rapidyaml.py +28 -0
  109. jolt/pkgs/re2.py +30 -0
  110. jolt/pkgs/re2c.py +17 -0
  111. jolt/pkgs/readline.py +15 -0
  112. jolt/pkgs/rust.py +41 -0
  113. jolt/pkgs/sdl.py +28 -0
  114. jolt/pkgs/simdjson.py +27 -0
  115. jolt/pkgs/soci.py +46 -0
  116. jolt/pkgs/spdlog.py +29 -0
  117. jolt/pkgs/spirv_llvm.py +21 -0
  118. jolt/pkgs/spirv_tools.py +24 -0
  119. jolt/pkgs/sqlite.py +83 -0
  120. jolt/pkgs/ssl.py +12 -0
  121. jolt/pkgs/texinfo.py +15 -0
  122. jolt/pkgs/tomlplusplus.py +22 -0
  123. jolt/pkgs/wayland.py +26 -0
  124. jolt/pkgs/x11.py +58 -0
  125. jolt/pkgs/xerces_c.py +20 -0
  126. jolt/pkgs/xorg.py +360 -0
  127. jolt/pkgs/xz.py +29 -0
  128. jolt/pkgs/yamlcpp.py +30 -0
  129. jolt/pkgs/zeromq.py +47 -0
  130. jolt/pkgs/zlib.py +69 -0
  131. jolt/pkgs/zstd.py +33 -0
  132. jolt/plugins/alias.py +3 -0
  133. jolt/plugins/allure.py +5 -2
  134. jolt/plugins/autotools.py +66 -0
  135. jolt/plugins/cache.py +133 -0
  136. jolt/plugins/cmake.py +74 -6
  137. jolt/plugins/conan.py +238 -0
  138. jolt/plugins/cxx.py +698 -0
  139. jolt/plugins/cxxinfo.py +7 -0
  140. jolt/plugins/dashboard.py +1 -1
  141. jolt/plugins/docker.py +91 -23
  142. jolt/plugins/email.py +5 -2
  143. jolt/plugins/email.xslt +144 -101
  144. jolt/plugins/environ.py +11 -0
  145. jolt/plugins/fetch.py +141 -0
  146. jolt/plugins/gdb.py +44 -21
  147. jolt/plugins/gerrit.py +1 -14
  148. jolt/plugins/git.py +316 -101
  149. jolt/plugins/googletest.py +522 -1
  150. jolt/plugins/http.py +36 -38
  151. jolt/plugins/libtool.py +63 -0
  152. jolt/plugins/linux.py +990 -0
  153. jolt/plugins/logstash.py +4 -4
  154. jolt/plugins/meson.py +61 -0
  155. jolt/plugins/ninja-compdb.py +107 -31
  156. jolt/plugins/ninja.py +929 -134
  157. jolt/plugins/paths.py +11 -1
  158. jolt/plugins/pkgconfig.py +219 -0
  159. jolt/plugins/podman.py +148 -91
  160. jolt/plugins/python.py +137 -0
  161. jolt/plugins/remote_execution/__init__.py +0 -0
  162. jolt/plugins/remote_execution/administration_pb2.py +46 -0
  163. jolt/plugins/remote_execution/administration_pb2_grpc.py +170 -0
  164. jolt/plugins/remote_execution/log_pb2.py +32 -0
  165. jolt/plugins/remote_execution/log_pb2_grpc.py +68 -0
  166. jolt/plugins/remote_execution/scheduler_pb2.py +41 -0
  167. jolt/plugins/remote_execution/scheduler_pb2_grpc.py +141 -0
  168. jolt/plugins/remote_execution/worker_pb2.py +38 -0
  169. jolt/plugins/remote_execution/worker_pb2_grpc.py +112 -0
  170. jolt/plugins/report.py +12 -2
  171. jolt/plugins/rust.py +25 -0
  172. jolt/plugins/scheduler.py +710 -0
  173. jolt/plugins/selfdeploy/setup.py +9 -4
  174. jolt/plugins/selfdeploy.py +138 -88
  175. jolt/plugins/strings.py +35 -22
  176. jolt/plugins/symlinks.py +26 -11
  177. jolt/plugins/telemetry.py +5 -2
  178. jolt/plugins/timeline.py +13 -3
  179. jolt/plugins/volume.py +46 -48
  180. jolt/scheduler.py +591 -191
  181. jolt/tasks.py +1783 -245
  182. jolt/templates/export.sh.template +12 -6
  183. jolt/templates/timeline.html.template +44 -47
  184. jolt/timer.py +22 -0
  185. jolt/tools.py +749 -302
  186. jolt/utils.py +245 -18
  187. jolt/version.py +1 -1
  188. jolt/version_utils.py +2 -2
  189. jolt/xmldom.py +12 -2
  190. {jolt-0.9.76.dist-info → jolt-0.9.429.dist-info}/METADATA +98 -38
  191. jolt-0.9.429.dist-info/RECORD +207 -0
  192. {jolt-0.9.76.dist-info → jolt-0.9.429.dist-info}/WHEEL +1 -1
  193. jolt/plugins/amqp.py +0 -834
  194. jolt/plugins/debian.py +0 -338
  195. jolt/plugins/ftp.py +0 -181
  196. jolt/plugins/ninja-cache.py +0 -64
  197. jolt/plugins/ninjacli.py +0 -271
  198. jolt/plugins/repo.py +0 -253
  199. jolt-0.9.76.dist-info/RECORD +0 -79
  200. {jolt-0.9.76.dist-info → jolt-0.9.429.dist-info}/entry_points.txt +0 -0
  201. {jolt-0.9.76.dist-info → jolt-0.9.429.dist-info}/top_level.txt +0 -0
jolt/scheduler.py CHANGED
@@ -1,19 +1,21 @@
1
1
  from concurrent.futures import ThreadPoolExecutor, as_completed, Future
2
+ import copy
3
+ from functools import wraps
2
4
  import os
3
5
  import queue
6
+ from threading import Lock
4
7
 
8
+ from jolt import common_pb2 as common_pb
5
9
  from jolt import config
6
10
  from jolt import hooks
7
11
  from jolt import log
8
12
  from jolt import utils
9
13
  from jolt import tools
10
- from jolt.error import raise_error
11
14
  from jolt.error import raise_task_error
12
15
  from jolt.error import raise_task_error_if
13
16
  from jolt.graph import PruneStrategy
14
- from jolt.manifest import ManifestExtension
15
- from jolt.manifest import ManifestExtensionRegistry
16
17
  from jolt.options import JoltOptions
18
+ from jolt.timer import Timer
17
19
 
18
20
 
19
21
  class JoltEnvironment(object):
@@ -22,109 +24,288 @@ class JoltEnvironment(object):
22
24
 
23
25
 
24
26
  class TaskQueue(object):
25
- def __init__(self, strategy):
27
+ """
28
+ A helper class for tracking tasks in progress and their completion.
29
+ """
30
+
31
+ def __init__(self):
26
32
  self.futures = {}
27
- self.strategy = strategy
33
+ self.futures_lock = Lock()
28
34
  self.duration_acc = utils.duration_diff(0)
29
35
  self._aborted = False
36
+ self._timer = Timer(60, self._log_task_running_time)
37
+ self._timer.start()
38
+
39
+ def _log_task_running_time(self):
40
+ with self.futures_lock:
41
+ for future in self.futures:
42
+ self.futures[future].task.log_running_time()
43
+
44
+ def submit(self, executor):
45
+ """
46
+ Submit an exeuctor to the task queue for execution.
47
+
48
+ The method schedules the executor for execution and returns a Future object
49
+ that may be used to track completion of the task.
50
+ """
30
51
 
31
- def submit(self, cache, task):
32
52
  if self._aborted:
33
53
  return None
34
54
 
35
- env = JoltEnvironment(cache=cache)
36
- executor = self.strategy.create_executor(task)
37
- raise_task_error_if(
38
- not executor, task,
39
- "no executor can execute the task; "
40
- "requesting a distributed network build without proper configuration?")
41
-
42
- task.set_in_progress()
43
- future = executor.submit(env)
44
- self.futures[future] = task
55
+ env = JoltEnvironment(queue=self)
56
+ future = executor.schedule(env)
57
+ with self.futures_lock:
58
+ self.futures[future] = executor
45
59
  return future
46
60
 
47
61
  def wait(self):
62
+ """
63
+ Wait for any task to complete.
64
+
65
+ The method waits for the next task to complete and returns the task and any
66
+ exception that may have occurred during execution. If no task is in progress,
67
+ the method returns None, None.
68
+ """
69
+
48
70
  for future in as_completed(self.futures):
49
- task = self.futures[future]
71
+ task = self.futures[future].task
50
72
  try:
51
73
  future.result()
52
74
  except Exception as error:
53
- log.exception()
54
75
  return task, error
55
76
  finally:
56
77
  self.duration_acc += task.duration_running or 0
57
- del self.futures[future]
78
+ with self.futures_lock:
79
+ del self.futures[future]
58
80
  return task, None
59
81
  return None, None
60
82
 
61
83
  def abort(self):
84
+ """
85
+ Abort all tasks in progress.
86
+
87
+ The method cancels all tasks in progress and prevents any new tasks from being
88
+ submitted to the task queue. The method doesn't wait for all tasks to complete
89
+ before returning.
90
+ """
62
91
  self._aborted = True
63
- for future, task in self.futures.items():
64
- future.cancel()
65
- if len(self.futures):
66
- log.info("Waiting for tasks to finish, please be patient")
67
- self.strategy.executors.shutdown()
92
+ with self.futures_lock:
93
+ for future, executor in self.futures.items():
94
+ executor.cancel()
95
+ future.cancel()
96
+ if len(self.futures):
97
+ log.info("Waiting for tasks to finish, please be patient")
98
+ self._timer.cancel()
99
+
100
+ def shutdown(self):
101
+ """
102
+ Shutdown the task queue.
103
+ """
104
+ self._timer.cancel()
68
105
 
69
106
  def is_aborted(self):
107
+ """ Returns true if the task queue has been aborted. """
70
108
  return self._aborted
71
109
 
72
110
  def in_progress(self, task):
73
- return task in self.futures.values()
111
+ """ Returns true if the task is in progress. """
112
+ with self.futures_lock:
113
+ return task in self.futures.values()
114
+
115
+ def empty(self):
116
+ """ Returns true if the task queue is empty. """
117
+ with self.futures_lock:
118
+ return len(self.futures) == 0
74
119
 
75
120
 
76
121
  class Executor(object):
122
+ """
123
+ Base class for all executors.
124
+
125
+ An executor is responsible for running a task. It is created by an executor
126
+ factory and is submitted to a task queue. The factory is also
127
+ responsible for hosting a thread pool that will run the executors it creates.
128
+
129
+ The type of executor created by the factory depends on the execution strategy
130
+ selected by the user through command line options. The strategy is responsible
131
+ for deciding which executor to create for each task.
132
+
133
+ An implementation of an executor must implement the run method, which is called
134
+ from the thread pool. The run method is responsible for running the task and
135
+ handling any exceptions that may occur during execution.
136
+ """
137
+
77
138
  def __init__(self, factory):
78
139
  self.factory = factory
79
140
 
80
- def submit(self, env):
141
+ def schedule(self, env):
142
+ """ Schedule the task for execution.
143
+
144
+ This method is called by the task queue to schedule the task for execution
145
+ in the factory thread pool. The method must return a Future object that
146
+ represents the task execution. The Future object is used to track the
147
+ execution of the task and to retrieve the result of the execution
148
+ once it is completed.
149
+
150
+ The method must be implemented by all executors. They must call the
151
+ factory submit method to schedule the task for execution and also
152
+ mark the task as in progress with set_in_progress().
153
+
154
+ Args:
155
+ env: The JoltEnvironment object that contains the queue and cache objects.
156
+
157
+ """
81
158
  return self.factory.submit(self, env)
82
159
 
160
+ def cancel(self):
161
+ """
162
+ Cancel the task.
163
+
164
+ This method is optional and may be implemented by executors that support
165
+ cancellation of tasks, such as network executors where a remote scheduler
166
+ may be able to cancel a task that is already running.
167
+
168
+ By default, the method does nothing.
169
+ """
170
+ pass
171
+
83
172
  def is_aborted(self):
173
+ """ Check if executor has been aborted. """
84
174
  return self.factory.is_aborted()
85
175
 
86
176
  def run(self, env):
87
- pass
177
+ """
178
+ Run the task.
179
+
180
+ This method must be implemented by all executors. It is called from the
181
+ factory thread pool and is responsible for running the task
182
+ and handling any exceptions that may occur during execution.
183
+ Any exceptions raised by the task must, if caught, be re-raised to
184
+ the caller unless the task is marked as unstable, in which case the
185
+ exception should be logged and ignored.
186
+
187
+ The task run() method shall be run within a hooks.task_run()
188
+ context manager to ensure that the task status is recognized by
189
+ the report hooks and other plugins.
190
+
191
+ Network executors have additional requirements. See the
192
+ NetworkExecutor class for more information.
193
+ """
194
+ raise NotImplementedError
88
195
 
89
196
 
90
197
  class LocalExecutor(Executor):
198
+ """
199
+ An Executor that runs a task locally.
200
+
201
+ The executor runs the task on the local machine. The task is run
202
+ by calling the task.run() method.
203
+
204
+ The executor is created by the local executor factory and is
205
+ typically run sequentially with other executors.
206
+ """
207
+
91
208
  def __init__(self, factory, task, force_upload=False, force_build=False):
92
- super(LocalExecutor, self).__init__(factory)
209
+ super().__init__(factory)
93
210
  self.task = task
94
211
  self.force_build = force_build
95
212
  self.force_upload = force_upload
96
213
 
97
- def run(self, env):
214
+ def schedule(self, env):
215
+ """
216
+ Schedule the task for execution.
217
+
218
+ The task is marked as in progress before scheduling.
219
+ """
220
+ self.task.set_in_progress()
221
+ return super().schedule(env)
222
+
223
+ def _run(self, env, task):
98
224
  if self.is_aborted():
99
225
  return
100
226
  try:
101
- self.task.started()
102
- hooks.task_started_execution(self.task)
103
- with hooks.task_run(self.task):
227
+ with hooks.task_run(task):
104
228
  self.task.run(
105
- env.cache,
229
+ env,
106
230
  force_build=self.force_build,
107
231
  force_upload=self.force_upload)
232
+
108
233
  except Exception as e:
109
- log.exception()
110
- self.task.failed()
111
- raise e
112
- else:
113
- hooks.task_finished_execution(self.task)
114
- self.task.finished()
115
- return self.task
234
+ log.exception(e, error=False)
235
+ if not task.is_unstable:
236
+ self.task.raise_for_status(log_error=getattr(env, "worker", False))
237
+ raise e
238
+
239
+ def get_all_extensions(self, task):
240
+ extensions = copy.copy(task.extensions)
241
+ for ext in extensions:
242
+ extensions.extend(self.get_all_extensions(ext))
243
+ return extensions
244
+
245
+ def run(self, env):
246
+ tasks = [self.task] + self.get_all_extensions(self.task)
247
+ for task in tasks:
248
+ task.queued()
249
+
250
+ self._run(env, self.task)
116
251
 
117
252
 
118
253
  class NetworkExecutor(Executor):
119
- pass
254
+ def run(self, env):
255
+ """
256
+ Run the task.
257
+
258
+ See the Executor class for basic information.
259
+
260
+ Network executors have additional requirements. Before scheduling
261
+ the task to a remote scheduler, the executor must call
262
+ run_resources() on the task. This acquires any Resources marked
263
+ local=True and uploads the resulting session artifacts
264
+ to the remote cache.
265
+
266
+ Once the task has been submitted to the remote scheduler, the executor
267
+ must run task.queued() on the task and its extensions. This is done
268
+ to ensure that the task status is correctly reported to the
269
+ user.
270
+
271
+ For any change in state of task, the executor must run one of:
272
+
273
+ - task.running_execution(remote=True) - when the task has started
274
+ - task.failed_execution(remote=True) - when the task has failed
275
+ - task.failed_execution(remote=True, interrupt=True) - when the
276
+ task has been interrupted, e.g. by a user request or rescheduling
277
+ - task.finished_execution(remote=True) - when the task has passed
278
+
279
+ Upon completion of the task, whether successful or not, task
280
+ session artifacts must be downloaded to the local cache, if
281
+ the task is marked as downloadable. This is done by calling
282
+ task.download() with the session_only flag set to True.
283
+
284
+ Persistent artifacts are downloaded only if the task is successful
285
+ and the task is marked as downloadable.
286
+ """
287
+ raise NotImplementedError
120
288
 
121
289
 
122
290
  class SkipTask(Executor):
291
+ """
292
+ An Executor that skips a task.
293
+
294
+ This executor is created by the concurrent executor factory when a task
295
+ is skipped, i.e. when the task artifacts are already available locally or
296
+ remotely and the task does not need to be run.
297
+ """
298
+
123
299
  def __init__(self, factory, task, *args, **kwargs):
124
- super(SkipTask, self).__init__(factory, *args, **kwargs)
300
+ super().__init__(factory, *args, **kwargs)
125
301
  self.task = task
126
302
 
127
303
  def run(self, env):
304
+ """
305
+ Skip the task.
306
+
307
+ The task and its extensions are marked as skipped.
308
+ """
128
309
  self.task.skipped()
129
310
  for ext in self.task.extensions:
130
311
  ext.skipped()
@@ -132,157 +313,224 @@ class SkipTask(Executor):
132
313
 
133
314
 
134
315
  class Downloader(Executor):
316
+ """
317
+ An Executor that downloads task artifacts.
318
+
319
+ The executor downloads the task artifacts and its extensions from the
320
+ remote cache to the local cache. Failure to download the artifacts
321
+ is reported by raising an exception.
322
+
323
+ Downloader executors are typically run in parallel with other executors.
324
+
325
+ """
135
326
  def __init__(self, factory, task, *args, **kwargs):
136
- super(Downloader, self).__init__(factory, *args, **kwargs)
327
+ super().__init__(factory, *args, **kwargs)
137
328
  self.task = task
138
329
 
139
- def _download(self, env, task):
330
+ def schedule(self, env):
331
+ """
332
+ Schedule the task for execution.
333
+
334
+ The task is marked as in progress before scheduling.
335
+ """
336
+ self.task.set_in_progress()
337
+ return super().schedule(env)
338
+
339
+ def _download(self, task):
140
340
  if self.is_aborted():
141
341
  return
142
342
  if not task.is_downloadable():
143
343
  return
144
344
  try:
145
- task.started("Download")
146
- hooks.task_started_download(task)
345
+ task.started_download()
147
346
  raise_task_error_if(
148
- not env.cache.download(task),
347
+ not task.download(persistent_only=True),
149
348
  task, "Failed to download task artifact")
150
349
  except Exception as e:
151
350
  with task.task.report() as report:
152
351
  report.add_exception(e)
153
- task.failed("Download")
352
+ task.failed_download()
154
353
  raise e
155
354
  else:
156
- hooks.task_finished_download(task)
157
- task.finished("Download")
355
+ task.finished_download()
158
356
 
159
357
  def run(self, env):
160
- self._download(env, self.task)
358
+ """ Downloads artifacts. """
359
+
360
+ self._download(self.task)
161
361
  for ext in self.task.extensions:
162
- self._download(env, ext)
362
+ self._download(ext)
163
363
  return self.task
164
364
 
165
365
 
166
366
  class Uploader(Executor):
367
+ """
368
+ An Executor that uploads task artifacts.
369
+
370
+ The executor uploads the task artifacts and its extensions from the
371
+ local cache to the remote cache. Failure to upload the artifacts
372
+ is reported by raising an exception.
373
+
374
+ Uploader executors are typically run in parallel with other executors.
375
+ """
376
+
167
377
  def __init__(self, factory, task, *args, **kwargs):
168
- super(Uploader, self).__init__(factory, *args, **kwargs)
378
+ super().__init__(factory, *args, **kwargs)
169
379
  self.task = task
170
380
 
171
- def _upload(self, env, task):
381
+ def schedule(self, env):
382
+ """
383
+ Schedule the task for execution.
384
+
385
+ The task is marked as in progress before scheduling.
386
+ """
387
+ self.task.set_in_progress()
388
+ return super().schedule(env)
389
+
390
+ def _upload(self, task):
172
391
  if self.is_aborted():
173
392
  return
174
393
  try:
175
- task.started("Upload")
176
- hooks.task_started_upload(task)
394
+ task.started_upload()
177
395
  raise_task_error_if(
178
- not env.cache.upload(task),
396
+ not task.upload(persistent_only=True),
179
397
  task, "Failed to upload task artifact")
180
398
  except Exception as e:
181
399
  with task.task.report() as report:
182
400
  report.add_exception(e)
183
- task.failed("Upload")
401
+ task.failed_upload()
184
402
  raise e
185
403
  else:
186
- hooks.task_finished_upload(task)
187
- task.finished("Upload")
404
+ task.finished_upload()
188
405
 
189
406
  def run(self, env):
190
- self._upload(env, self.task)
407
+ """ Uploads artifacts. """
408
+
409
+ self._upload(self.task)
191
410
  for ext in self.task.extensions:
192
- self._upload(env, ext)
411
+ self._upload(ext)
193
412
 
194
413
  return self.task
195
414
 
196
415
 
197
416
  @utils.Singleton
198
417
  class ExecutorRegistry(object):
418
+ """
419
+ The ExecutorRegistry is responsible for creating executors.
420
+
421
+ The types of executors that are possible to create are:
422
+
423
+ - create_local: Runs tasks locally.
424
+ - create_network: Schedules tasks for remote execution.
425
+ - create_downloader: Downloads task artifacts.
426
+ - create_uploader: Uploads task artifacts.
427
+ - create_skipper: Skips tasks.
428
+
429
+ The registry utilizes different ExecutorFactory objects to create executors. Plugins
430
+ can register their own NetworkExecutorFactory objects with the help of the
431
+ ExecutorFactory.Register decorator.
432
+ """
433
+
199
434
  executor_factories = []
200
- extension_factories = []
201
435
 
202
436
  def __init__(self, options=None):
203
437
  self._options = options or JoltOptions()
204
438
  self._factories = [factory(self._options) for factory in self.__class__.executor_factories]
205
439
  self._local_factory = LocalExecutorFactory(self._options)
206
440
  self._concurrent_factory = ConcurrentLocalExecutorFactory(self._options)
207
- self._extensions = [factory().create() for factory in self.__class__.extension_factories]
208
441
 
209
442
  def shutdown(self):
443
+ """ Shuts all executor factories and thread-pools down """
444
+
210
445
  for factory in self._factories:
211
446
  factory.shutdown()
212
447
  self._local_factory.shutdown()
213
448
  self._concurrent_factory.shutdown()
214
449
 
450
+ def create_session(self, graph):
451
+ """ Creates a session for all factories. """
452
+ return {factory: factory.create_session(graph) for factory in self._factories}
453
+
215
454
  def create_skipper(self, task):
455
+ """ Creates an executor that skips a task. """
216
456
  return SkipTask(self._concurrent_factory, task)
217
457
 
218
458
  def create_downloader(self, task):
219
- # TODO: Switch to concurrent factory once the progress bar can handle it
459
+ """ Creates an executor that downloads task artifacts. """
220
460
  return Downloader(self._concurrent_factory, task)
221
461
 
222
462
  def create_uploader(self, task):
223
- # TODO: Switch to concurrent factory once the progress bar can handle it
463
+ """ Creates an executor that uploads task artifacts. """
224
464
  return Uploader(self._concurrent_factory, task)
225
465
 
226
466
  def create_local(self, task, force=False):
467
+ """ Creates an executor that runs a task locally. """
227
468
  task.set_locally_executed()
228
469
  return self._local_factory.create(task, force=force)
229
470
 
230
- def create_network(self, task):
471
+ def create_network(self, session, task):
472
+ """
473
+ Creates an executor that schedules a task for remote execution.
474
+
475
+ All registred network executor factories are queried to create an executor.
476
+ The first factory that can create an executor is used. If no factory is able
477
+ to create an executor, a local executor is created as fallback.
478
+ """
479
+
231
480
  for factory in self._factories:
232
- executor = factory.create(task)
481
+ executor = factory.create(session[factory], task)
233
482
  if executor is not None:
234
483
  task.set_remotely_executed()
235
484
  return executor
236
485
  return self.create_local(task)
237
486
 
238
- def get_network_parameters(self, task):
239
- parameters = {}
240
- for extension in self._extensions:
241
- parameters.update(extension.get_parameters(task))
242
- return parameters
243
-
244
-
245
- class NetworkExecutorExtensionFactory(object):
246
- @staticmethod
247
- def Register(cls):
248
- ExecutorRegistry.extension_factories.insert(0, cls)
249
- return cls
250
-
251
- def create(self):
252
- raise NotImplementedError()
253
487
 
488
+ class ExecutorFactory(object):
489
+ """
490
+ The ExecutorFactory class is responsible for creating executors.
254
491
 
255
- class NetworkExecutorExtension(object):
256
- def get_parameters(self, task):
257
- return {}
258
-
492
+ The factory is responsible for creating executors that run tasks. The factory
493
+ is also responsible for hosting a thread pool that will run the executors it creates.
259
494
 
260
- class Job(object):
261
- def __init__(self, priority, future, executor, env):
262
- self.priority = priority
263
- self.future = future
264
- self.executor = executor
265
- self.env = env
495
+ """
496
+ class QueueItem(object):
497
+ """
498
+ The type of item that is put into the queue thread-pool queue.
266
499
 
267
- def __le__(self, o):
268
- return self.priority <= o.priority
500
+ It wraps the executor and its priority.
501
+ """
502
+ def __init__(self, priority: int, future: Future, executor: Executor, env: JoltEnvironment):
503
+ self.priority = priority
504
+ self.future = future
505
+ self.executor = executor
506
+ self.env = env
269
507
 
270
- def __ge__(self, o):
271
- return self.priority >= o.priority
508
+ def __le__(self, o):
509
+ return self.priority <= o.priority
272
510
 
273
- def __lt__(self, o):
274
- return self.priority < o.priority
511
+ def __ge__(self, o):
512
+ return self.priority >= o.priority
275
513
 
276
- def __gt__(self, o):
277
- return self.priority > o.priority
514
+ def __lt__(self, o):
515
+ return self.priority < o.priority
278
516
 
279
- def __eq__(self, o):
280
- return self.priority == o.priority
517
+ def __gt__(self, o):
518
+ return self.priority > o.priority
281
519
 
520
+ def __eq__(self, o):
521
+ return self.priority == o.priority
282
522
 
283
- class ExecutorFactory(object):
284
523
  @staticmethod
285
524
  def Register(cls):
525
+ """
526
+ Decorator to register an executor factory.
527
+
528
+ The decorator is used by plugins that whish to register their own
529
+ executor factories. Such factories are used by the ExecutorRegistry
530
+ to create executors for tasks, as determined by the execution strategy
531
+ selected by the user.
532
+ """
533
+
286
534
  ExecutorRegistry.executor_factories.insert(0, cls)
287
535
  return cls
288
536
 
@@ -293,59 +541,101 @@ class ExecutorFactory(object):
293
541
  self._options = options or JoltOptions()
294
542
 
295
543
  def is_aborted(self):
544
+ """ Returns true if the build and thus the factory has been aborted. """
296
545
  return self._aborted
297
546
 
298
547
  def is_keep_going(self):
548
+ """ Returns true if the build should continue even if a task fails. """
299
549
  return self._options.keep_going
300
550
 
301
551
  def shutdown(self):
552
+ """
553
+ Called to shutdown the factory and its thread-pool.
554
+
555
+ The method is called when the build is complete or when the build is aborted.
556
+ After the method is called, no more tasks can be submitted to the factory and
557
+ the is_aborted() method will return True.
558
+ """
302
559
  self._aborted = True
303
560
  self.pool.shutdown()
304
561
 
305
562
  def create(self, task):
563
+ """
564
+ Create an executor for the provided task.
565
+
566
+ Must be implemented by all executor factories. The method must return
567
+ an executor that is capable of running the task. The executor must be
568
+ created with the factory as its parent so that it can be submitted to
569
+ the correct thread-pool for execution.
570
+ """
306
571
  raise NotImplementedError()
307
572
 
308
573
  def _run(self):
309
- job = self._queue.get(False)
574
+ item = self._queue.get(False)
310
575
  self._queue.task_done()
311
576
  try:
312
577
  if not self.is_aborted():
313
- job.executor.run(job.env)
578
+ item.executor.run(item.env)
314
579
  except KeyboardInterrupt as e:
315
- raise_error("Interrupted by user")
316
580
  self._aborted = True
317
- job.future.set_exception(e)
581
+ item.future.set_exception(e)
318
582
  except Exception as e:
319
583
  if not self.is_keep_going():
320
584
  self._aborted = True
321
- job.future.set_exception(e)
585
+ item.future.set_exception(e)
322
586
  else:
323
- job.future.set_result(job.executor)
587
+ item.future.set_result(item.executor)
324
588
 
325
589
  def submit(self, executor, env):
590
+ """
591
+ Submit an executor to the thread-pool for execution.
592
+
593
+ The method submits the executor to the thread-pool for execution. The executor
594
+ is wrapped in a Future object that is returned to the caller. The Future object
595
+ is used to track the execution of the task and to retrieve the result of the
596
+ execution once it is completed.
597
+ """
326
598
  future = Future()
327
- self._queue.put(Job(-executor.task.weight, future, executor, env))
599
+ self._queue.put(ExecutorFactory.QueueItem(-executor.task.weight, future, executor, env))
328
600
  self.pool.submit(self._run)
329
601
  return future
330
602
 
331
603
 
332
604
  class LocalExecutorFactory(ExecutorFactory):
605
+ """
606
+ Factory for creating local executors.
607
+
608
+ The factory creates executors that run tasks locally. Typically,
609
+ only one LocalExecutor is allowed to run at a time, unless the
610
+ user has specified a higher number of parallel tasks in the
611
+ configuration file or through command line options (-j).
612
+ """
613
+
333
614
  def __init__(self, options=None):
334
615
  max_workers = config.getint(
335
616
  "jolt", "parallel_tasks",
336
617
  os.getenv("JOLT_PARALLEL_TASKS", 1 if options is None else options.jobs))
337
- super(LocalExecutorFactory, self).__init__(
618
+ super().__init__(
338
619
  options=options,
339
620
  max_workers=max_workers)
340
621
 
341
622
  def create(self, task, force=False):
623
+ """ Create a LocalExecutor for the task. """
342
624
  return LocalExecutor(self, task, force_build=force)
343
625
 
344
626
 
345
627
  class ConcurrentLocalExecutorFactory(ExecutorFactory):
628
+ """
629
+ A shared factory for local executors that are allowed to run concurrently.
630
+
631
+ The factory cannot create any executors on its own. Instead, its executors
632
+ are created by the ExecutorRegistry. The factory thread-pool is then used to
633
+ run executors concurrently.
634
+ """
635
+
346
636
  def __init__(self, options=None):
347
637
  max_workers = tools.Tools().thread_count()
348
- super(ConcurrentLocalExecutorFactory, self).__init__(
638
+ super().__init__(
349
639
  options=options,
350
640
  max_workers=max_workers)
351
641
 
@@ -354,56 +644,131 @@ class ConcurrentLocalExecutorFactory(ExecutorFactory):
354
644
 
355
645
 
356
646
  class NetworkExecutorFactory(ExecutorFactory):
647
+ """
648
+ Base class for executors that schedule task executions remotely in a build cluster.
649
+ """
650
+
357
651
  def __init__(self, *args, **kwargs):
358
- super(NetworkExecutorFactory, self).__init__(*args, **kwargs)
652
+ super().__init__(*args, **kwargs)
653
+
654
+ def create(self, session, task):
655
+ raise NotImplementedError()
656
+
657
+
658
+ def ensure_executor_return(func):
659
+ """ Decorator to ensure that an executor is returned by factories. """
660
+
661
+ @wraps(func)
662
+ def wrapper(self, session, task):
663
+ executor = func(self, session, task)
664
+ raise_task_error_if(
665
+ not executor, task,
666
+ "no executor can execute the task; "
667
+ "requesting a distributed network build without proper configuration?")
668
+ return executor
669
+
670
+ return wrapper
359
671
 
360
672
 
361
673
  class ExecutionStrategy(object):
362
- def create_executor(self, task):
674
+ """
675
+ Base class for all execution strategies.
676
+
677
+ An execution strategy is responsible for deciding which executor to create for each task.
678
+ The decision is based on the type of task and the availability of the task's artifacts in
679
+ local and remote caches.
680
+
681
+ The strategy is also responsible for deciding if task requirements should be pruned
682
+ from the build graph. This is done to avoid processing tasks that are not needed for the build.
683
+
684
+ Strategies are selected by the user through command line options.
685
+
686
+ """
687
+ def create_executor(self, session, task):
688
+ """
689
+ Create an executor for the task.
690
+
691
+ The method must be implemented by all execution strategies. It is responsible for
692
+ creating an executor that is capable of running or processing the task. Creation
693
+ of an executor should be delegated to the ExecutorRegistry which has the knowledge
694
+ of all available executor factories.
695
+ """
696
+ raise NotImplementedError()
697
+
698
+ def should_prune_requirements(self, task):
699
+ """
700
+ Return True if the task requirements should be pruned from the build graph.
701
+
702
+ The method must be implemented by all execution strategies.
703
+ """
363
704
  raise NotImplementedError()
364
705
 
365
706
 
366
707
  class LocalStrategy(ExecutionStrategy, PruneStrategy):
708
+ """
709
+ Strategy for local builds.
710
+
711
+ By default, the strategy schedules tasks for local execution, unless the task
712
+ artifacts are available in the local cache. If available remotely, the strategy
713
+ will create a downloader executor to download the artifacts.
714
+ """
715
+
367
716
  def __init__(self, executors, cache):
368
717
  self.executors = executors
369
718
  self.cache = cache
370
719
 
371
- def create_executor(self, task):
372
- if task.is_alias():
720
+ @ensure_executor_return
721
+ def create_executor(self, session, task):
722
+ """ Create an executor for the task. """
723
+
724
+ if task.is_alias() or task.is_resource():
373
725
  return self.executors.create_skipper(task)
374
726
  if not task.is_cacheable():
375
727
  return self.executors.create_local(task)
376
- if task.is_available_locally(self.cache):
728
+ if task.is_available_locally():
377
729
  return self.executors.create_skipper(task)
378
- if self.cache.download_enabled() and task.is_available_remotely(self.cache):
730
+ if self.cache.download_enabled() and task.is_available_remotely():
379
731
  return self.executors.create_downloader(task)
380
732
  return self.executors.create_local(task)
381
733
 
382
734
  def should_prune_requirements(self, task):
735
+ """ Prune task requirements if possible """
736
+
383
737
  if task.is_alias() or not task.is_cacheable():
384
738
  return False
385
- if task.is_available_locally(self.cache):
739
+ if task.is_available_locally():
386
740
  return True
387
- if self.cache.download_enabled() and task.is_available_remotely(self.cache):
741
+ if self.cache.download_enabled() and task.is_available_remotely():
388
742
  return True
389
743
  return False
390
744
 
391
745
 
392
746
  class DownloadStrategy(ExecutionStrategy, PruneStrategy):
747
+ """
748
+ Strategy for downloading task artifacts.
749
+
750
+ The strategy is used when the user has requested that task artifacts be downloaded.
751
+ If the task artifacts are available in the local cache, the strategy will skip the
752
+ task. If the task artifacts are available in the remote cache, the strategy will
753
+ create a downloader executor to download the artifacts. If the task artifacts are
754
+ not available in either cache, the strategy reports an error.
755
+ """
756
+
393
757
  def __init__(self, executors, cache):
394
758
  self.executors = executors
395
759
  self.cache = cache
396
760
 
397
- def create_executor(self, task):
761
+ @ensure_executor_return
762
+ def create_executor(self, session, task):
398
763
  if task.is_alias():
399
764
  return self.executors.create_skipper(task)
400
765
  if task.is_resource():
401
766
  return self.executors.create_local(task)
402
767
  if not task.is_cacheable():
403
768
  return self.executors.create_skipper(task)
404
- if task.is_available_locally(self.cache):
769
+ if task.is_available_locally():
405
770
  return self.executors.create_skipper(task)
406
- if self.cache.download_enabled() and task.is_available_remotely(self.cache):
771
+ if self.cache.download_enabled() and task.is_available_remotely(cache=False):
407
772
  return self.executors.create_downloader(task)
408
773
  raise_task_error(task, "Task must be built first")
409
774
 
@@ -412,25 +777,33 @@ class DownloadStrategy(ExecutionStrategy, PruneStrategy):
412
777
 
413
778
 
414
779
  class DistributedStrategy(ExecutionStrategy, PruneStrategy):
780
+ """
781
+ Strategy for distributed network builds.
782
+
783
+ By default, the strategy schedules tasks for remote execution, if there is no
784
+ artifact available. Otherwise, artifacts are either uploaded or downloaded as
785
+ needed.
786
+ """
787
+
415
788
  def __init__(self, executors, cache):
416
789
  self.executors = executors
417
790
  self.cache = cache
418
791
 
419
- def create_executor(self, task):
420
- if task.is_alias():
792
+ @ensure_executor_return
793
+ def create_executor(self, session, task):
794
+ """ Create an executor for the task. """
795
+
796
+ if task.is_alias() or task.is_resource():
421
797
  return self.executors.create_skipper(task)
422
798
 
423
- if task.is_resource():
424
- if task.deps_available_locally(self.cache):
425
- return self.executors.create_local(task)
426
- else:
427
- return self.executors.create_skipper(task)
799
+ if task.is_local():
800
+ return self.executors.create_local(task)
428
801
 
429
802
  if not task.is_cacheable():
430
- return self.executors.create_network(task)
803
+ return self.executors.create_network(session, task)
431
804
 
432
805
  if not self.cache.upload_enabled():
433
- return self.executors.create_network(task)
806
+ return self.executors.create_network(session, task)
434
807
 
435
808
  if not task.is_goal(with_extensions=False):
436
809
  task.disable_download()
@@ -438,53 +811,64 @@ class DistributedStrategy(ExecutionStrategy, PruneStrategy):
438
811
  if not extension.is_goal(with_extensions=False):
439
812
  extension.disable_download()
440
813
 
441
- remote = task.is_available_remotely(self.cache)
814
+ remote = task.is_available_remotely()
442
815
  if remote:
443
816
  if task.is_goal() and self.cache.download_enabled() and \
444
- not task.is_available_locally(self.cache):
817
+ not task.is_available_locally():
445
818
  return self.executors.create_downloader(task)
446
819
  return self.executors.create_skipper(task)
447
820
  else:
448
- if task.is_available_locally(self.cache) and task.is_uploadable(self.cache):
821
+ if task.is_available_locally() and task.is_uploadable():
449
822
  return self.executors.create_uploader(task)
450
- if task.is_fast() and task.deps_available_locally(self.cache):
451
- return self.executors.create_local(task)
823
+ if task.is_fast() and task.deps_available_locally():
824
+ return self.executors.create_local(task, force=True)
452
825
 
453
- return self.executors.create_network(task)
826
+ return self.executors.create_network(session, task)
454
827
 
455
828
  def should_prune_requirements(self, task):
829
+ """ Prune task requirements if possible """
830
+
456
831
  if task.is_alias() or not task.is_cacheable():
457
832
  return False
458
- if task.is_available_remotely(self.cache):
833
+ if task.is_available_remotely():
459
834
  return True
460
835
  return False
461
836
 
462
837
 
463
838
  class WorkerStrategy(ExecutionStrategy, PruneStrategy):
839
+ """
840
+ Strategy for worker builds.
841
+
842
+ This strategy is used on workers when the user has requested a network build.
843
+ It is similar to the LocalStrategy in that it will run tasks locally if no
844
+ artifacts are available. However, if artifacts are available locally, the
845
+ strategy will upload them to the remote cache.
846
+ """
847
+
464
848
  def __init__(self, executors, cache):
465
849
  self.executors = executors
466
850
  self.cache = cache
467
851
 
468
- def create_executor(self, task):
469
- if task.is_resource():
470
- return self.executors.create_local(task)
852
+ @ensure_executor_return
853
+ def create_executor(self, session, task):
854
+ """ Create an executor for the task. """
471
855
 
472
- if task.is_alias():
856
+ if task.is_alias() or task.is_resource():
473
857
  return self.executors.create_skipper(task)
474
858
 
475
859
  raise_task_error_if(
476
860
  not self.cache.upload_enabled(), task,
477
- "artifact upload must be enabled for workers, fix configuration")
861
+ "Artifact upload must be enabled for workers, fix configuration")
478
862
 
479
863
  if not task.is_cacheable():
480
864
  return self.executors.create_local(task)
481
865
 
482
- if task.is_available_locally(self.cache):
483
- if task.is_goal() and not task.is_available_remotely(self.cache):
866
+ if task.is_available_locally():
867
+ if task.is_goal() and not task.is_available_remotely():
484
868
  # Unpacked artifacts may become unpacked before we manage to upload.
485
869
  # To keep the implementation simple we take the easy road and rebuild
486
870
  # all artifacts that have not been unpacked, even if they are uploadable.
487
- if task.is_unpacked(self.cache) and task.is_uploadable(self.cache):
871
+ if task.is_unpacked() and task.is_uploadable():
488
872
  return self.executors.create_uploader(task)
489
873
  else:
490
874
  return self.executors.create_local(task, force=True)
@@ -493,64 +877,80 @@ class WorkerStrategy(ExecutionStrategy, PruneStrategy):
493
877
  if not self.cache.download_enabled():
494
878
  return self.executors.create_local(task)
495
879
 
496
- if task.is_available_remotely(self.cache):
880
+ if task.is_available_remotely():
497
881
  return self.executors.create_downloader(task)
882
+ elif not task.is_goal():
883
+ raise_task_error(task, "Task artifact removed from global cache, cannot continue")
498
884
 
499
885
  return self.executors.create_local(task)
500
886
 
501
887
  def should_prune_requirements(self, task):
888
+ """ Prune task requirements if possible """
889
+
502
890
  if task.is_alias() or not task.is_cacheable():
503
891
  return False
504
- if task.is_available_locally(self.cache):
892
+ if task.is_available_locally():
505
893
  # Unpacked artifacts may become unpacked before we manage to upload.
506
894
  # To keep the implementation simple we take the easy road and rebuild
507
895
  # all artifacts that have not been unpacked, even if they are uploadable.
508
- if task.is_unpacked(self.cache) and task.is_uploadable(self.cache):
896
+ if task.is_unpacked() and task.is_uploadable():
509
897
  return True
510
898
  if not task.is_goal() and task.task.selfsustained:
511
899
  return True
512
900
  return False
513
901
 
514
902
 
515
- class TaskIdentityExtension(ManifestExtension):
516
- def export_manifest(self, manifest, task):
517
- for child in [task] + task.extensions + task.descendants:
518
- manifest_task = manifest.find_task(child.qualified_name)
519
- if manifest_task is None:
520
- manifest_task = manifest.create_task()
521
- manifest_task.name = child.qualified_name
522
- manifest_task.identity = child.identity
523
-
524
-
525
- ManifestExtensionRegistry.add(TaskIdentityExtension())
526
-
527
-
528
- class TaskExportExtension(ManifestExtension):
529
- def export_manifest(self, manifest, task):
530
- short_task_names = set()
531
- for child in [task] + task.extensions + task.descendants:
532
- manifest_task = manifest.find_task(child.qualified_name)
533
- if manifest_task is None:
534
- manifest_task = manifest.create_task()
535
- manifest_task.name = child.qualified_name
536
- for key, export in child.task._get_export_objects().items():
537
- attrib = manifest_task.create_attribute()
538
- attrib.name = key
539
- attrib.value = export.export(child.task)
540
- short_task_names.add(child.name)
541
-
542
- # Figure out if any task with an overridden default parameter
543
- # value was included in the manifest. If so, add info about it.
544
- default_task_names = set()
545
- for task in task.options.default:
546
- short_name, _ = utils.parse_task_name(task)
547
- if short_name in short_task_names:
548
- default_task_names.add(task)
549
- if default_task_names:
550
- build = manifest.create_build()
551
- for task in default_task_names:
552
- default = build.create_default()
553
- default.name = task
903
+ def get_exported_task_set(task):
904
+ children = [task] + task.descendants
905
+ for ext in task.extensions:
906
+ children.extend(get_exported_task_set(ext))
907
+ return list(set(children))
908
+
909
+
910
+ def export_tasks(tasks):
911
+ pb_tasks = {}
554
912
 
913
+ for task in tasks:
914
+ properties = []
915
+ for key, export in task.task._get_export_objects().items():
916
+ value = export.export(task.task)
917
+ if value is not None:
918
+ pb_attrib = common_pb.Property(key=key, value=str(value))
919
+ properties.append(pb_attrib)
920
+
921
+ platform = common_pb.Platform(
922
+ properties=[
923
+ common_pb.Property(key=key, value=value)
924
+ for key, value in task.task.platform.items()
925
+ ]
926
+ )
927
+
928
+ args = dict(
929
+ identity=task.identity,
930
+ instance=task.instance,
931
+ taint=str(task.task.taint),
932
+ name=task.short_qualified_name,
933
+ platform=platform,
934
+ properties=properties,
935
+ )
936
+
937
+ pb_tasks[task.exported_name] = common_pb.Task(**args)
938
+
939
+ return pb_tasks
940
+
941
+
942
+ def export_task_default_params(tasks):
943
+ default_task_names = {}
944
+
945
+ for task in tasks:
946
+ for task in task.options.default:
947
+ short_name, params = utils.parse_task_name(task)
948
+ if short_name in default_task_names:
949
+ default_task_names[short_name].update(params)
950
+ else:
951
+ default_task_names[short_name] = params
555
952
 
556
- ManifestExtensionRegistry.add(TaskExportExtension())
953
+ return [
954
+ utils.format_task_name(name, params)
955
+ for name, params in default_task_names.items()
956
+ ]