taskflow 5.7.0__py3-none-any.whl → 5.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- taskflow/engines/action_engine/engine.py +18 -6
- taskflow/engines/action_engine/process_executor.py +2 -0
- taskflow/jobs/backends/impl_etcd.py +610 -0
- taskflow/test.py +0 -26
- taskflow/tests/unit/action_engine/test_creation.py +9 -3
- taskflow/tests/unit/action_engine/test_process_executor.py +19 -12
- taskflow/tests/unit/jobs/test_etcd_job.py +421 -0
- taskflow/tests/unit/test_arguments_passing.py +6 -0
- taskflow/tests/unit/test_engines.py +6 -0
- taskflow/tests/unit/test_retries.py +6 -0
- taskflow/tests/unit/test_suspend.py +6 -0
- taskflow/tests/utils.py +10 -0
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/AUTHORS +1 -0
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/METADATA +7 -5
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/RECORD +20 -18
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/entry_points.txt +1 -0
- taskflow-5.9.0.dist-info/pbr.json +1 -0
- taskflow-5.7.0.dist-info/pbr.json +0 -1
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/LICENSE +0 -0
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/WHEEL +0 -0
- {taskflow-5.7.0.dist-info → taskflow-5.9.0.dist-info}/top_level.txt +0 -0
|
@@ -31,7 +31,6 @@ from oslo_utils import timeutils
|
|
|
31
31
|
from taskflow.engines.action_engine import builder
|
|
32
32
|
from taskflow.engines.action_engine import compiler
|
|
33
33
|
from taskflow.engines.action_engine import executor
|
|
34
|
-
from taskflow.engines.action_engine import process_executor
|
|
35
34
|
from taskflow.engines.action_engine import runtime
|
|
36
35
|
from taskflow.engines import base
|
|
37
36
|
from taskflow import exceptions as exc
|
|
@@ -41,6 +40,11 @@ from taskflow import storage
|
|
|
41
40
|
from taskflow.types import failure
|
|
42
41
|
from taskflow.utils import misc
|
|
43
42
|
|
|
43
|
+
try:
|
|
44
|
+
from taskflow.engines.action_engine import process_executor
|
|
45
|
+
except ImportError:
|
|
46
|
+
process_executor = None
|
|
47
|
+
|
|
44
48
|
LOG = logging.getLogger(__name__)
|
|
45
49
|
|
|
46
50
|
|
|
@@ -559,24 +563,32 @@ String (case insensitive) Executor used
|
|
|
559
563
|
_executor_cls_matchers = [
|
|
560
564
|
_ExecutorTypeMatch((futures.ThreadPoolExecutor,),
|
|
561
565
|
executor.ParallelThreadTaskExecutor),
|
|
562
|
-
|
|
563
|
-
|
|
566
|
+
]
|
|
567
|
+
if process_executor is not None:
|
|
568
|
+
_executor_cls_matchers.append(
|
|
569
|
+
_ExecutorTypeMatch((futures.ProcessPoolExecutor,),
|
|
570
|
+
process_executor.ParallelProcessTaskExecutor)
|
|
571
|
+
)
|
|
572
|
+
_executor_cls_matchers.append(
|
|
564
573
|
_ExecutorTypeMatch((futures.Executor,),
|
|
565
574
|
executor.ParallelThreadTaskExecutor),
|
|
566
|
-
|
|
575
|
+
)
|
|
567
576
|
|
|
568
577
|
# One of these should match when a string/text is provided for the
|
|
569
578
|
# 'executor' option (a mixed case equivalent is allowed since the match
|
|
570
579
|
# will be lower-cased before checking).
|
|
571
580
|
_executor_str_matchers = [
|
|
572
|
-
_ExecutorTextMatch(frozenset(['processes', 'process']),
|
|
573
|
-
process_executor.ParallelProcessTaskExecutor),
|
|
574
581
|
_ExecutorTextMatch(frozenset(['thread', 'threads', 'threaded']),
|
|
575
582
|
executor.ParallelThreadTaskExecutor),
|
|
576
583
|
_ExecutorTextMatch(frozenset(['greenthread', 'greenthreads',
|
|
577
584
|
'greenthreaded']),
|
|
578
585
|
executor.ParallelGreenThreadTaskExecutor),
|
|
579
586
|
]
|
|
587
|
+
if process_executor is not None:
|
|
588
|
+
_executor_str_matchers.append(
|
|
589
|
+
_ExecutorTextMatch(frozenset(['processes', 'process']),
|
|
590
|
+
process_executor.ParallelProcessTaskExecutor)
|
|
591
|
+
)
|
|
580
592
|
|
|
581
593
|
# Used when no executor is provided (either a string or object)...
|
|
582
594
|
_default_executor_cls = executor.ParallelThreadTaskExecutor
|
|
@@ -580,6 +580,8 @@ class ParallelProcessTaskExecutor(base.ParallelTaskExecutor):
|
|
|
580
580
|
max_workers=None, wait_timeout=None):
|
|
581
581
|
super(ParallelProcessTaskExecutor, self).__init__(
|
|
582
582
|
executor=executor, max_workers=max_workers)
|
|
583
|
+
LOG.warning('Process task executor is deprecated. It is now disabled '
|
|
584
|
+
'in Python 3.12 or later and will be removed.')
|
|
583
585
|
self._auth_key = _create_random_string(32)
|
|
584
586
|
self._dispatcher = Dispatcher({}, self._auth_key,
|
|
585
587
|
_create_random_string(32))
|
|
@@ -0,0 +1,610 @@
|
|
|
1
|
+
# Copyright (C) Red Hat
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
4
|
+
# not use this file except in compliance with the License. You may obtain
|
|
5
|
+
# a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
11
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
12
|
+
# License for the specific language governing permissions and limitations
|
|
13
|
+
# under the License.
|
|
14
|
+
|
|
15
|
+
import threading
|
|
16
|
+
import typing
|
|
17
|
+
|
|
18
|
+
import etcd3gw
|
|
19
|
+
import fasteners
|
|
20
|
+
from oslo_serialization import jsonutils
|
|
21
|
+
from oslo_utils import timeutils
|
|
22
|
+
from oslo_utils import uuidutils
|
|
23
|
+
|
|
24
|
+
from taskflow import exceptions as exc
|
|
25
|
+
from taskflow.jobs import base
|
|
26
|
+
from taskflow import logging
|
|
27
|
+
from taskflow import states
|
|
28
|
+
from taskflow.utils import misc
|
|
29
|
+
if typing.TYPE_CHECKING:
|
|
30
|
+
from taskflow.types import entity
|
|
31
|
+
|
|
32
|
+
LOG = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class EtcdJob(base.Job):
|
|
36
|
+
"""An Etcd job."""
|
|
37
|
+
|
|
38
|
+
board: 'EtcdJobBoard'
|
|
39
|
+
|
|
40
|
+
def __init__(self, board: 'EtcdJobBoard', name, client, key,
|
|
41
|
+
uuid=None, details=None, backend=None,
|
|
42
|
+
book=None, book_data=None,
|
|
43
|
+
priority=base.JobPriority.NORMAL,
|
|
44
|
+
sequence=None, created_on=None):
|
|
45
|
+
super().__init__(board, name, uuid=uuid, details=details,
|
|
46
|
+
backend=backend, book=book, book_data=book_data)
|
|
47
|
+
|
|
48
|
+
self._client = client
|
|
49
|
+
self._key = key
|
|
50
|
+
self._priority = priority
|
|
51
|
+
self._sequence = sequence
|
|
52
|
+
self._created_on = created_on
|
|
53
|
+
self._root = board._root_path
|
|
54
|
+
|
|
55
|
+
self._lease = None
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def key(self):
|
|
59
|
+
return self._key
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def last_modified(self):
|
|
63
|
+
try:
|
|
64
|
+
raw_data = self.board.get_last_modified(self)
|
|
65
|
+
data = jsonutils.loads(raw_data)
|
|
66
|
+
ret = timeutils.parse_strtime(data["last_modified"])
|
|
67
|
+
return ret
|
|
68
|
+
except Exception:
|
|
69
|
+
LOG.exception("Cannot read load_modified key.")
|
|
70
|
+
return 0
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def created_on(self):
|
|
74
|
+
return self._created_on
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def state(self):
|
|
78
|
+
"""Access the current state of this job."""
|
|
79
|
+
owner, data = self.board.get_owner_and_data(self)
|
|
80
|
+
if not data:
|
|
81
|
+
if owner is not None:
|
|
82
|
+
LOG.info(f"Owner key was found for job {self.uuid}, "
|
|
83
|
+
f"but the key {self.key} is missing")
|
|
84
|
+
return states.COMPLETE
|
|
85
|
+
if not owner:
|
|
86
|
+
return states.UNCLAIMED
|
|
87
|
+
return states.CLAIMED
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def sequence(self):
|
|
91
|
+
return self._sequence
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def priority(self):
|
|
95
|
+
return self._priority
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def lease(self):
|
|
99
|
+
if not self._lease:
|
|
100
|
+
owner_data = self.board.get_owner_data(self)
|
|
101
|
+
if 'lease_id' not in owner_data:
|
|
102
|
+
return None
|
|
103
|
+
lease_id = owner_data['lease_id']
|
|
104
|
+
self._lease = etcd3gw.Lease(id=lease_id,
|
|
105
|
+
client=self._client)
|
|
106
|
+
return self._lease
|
|
107
|
+
|
|
108
|
+
def expires_in(self):
|
|
109
|
+
"""How many seconds until the claim expires."""
|
|
110
|
+
if self.lease is None:
|
|
111
|
+
return -1
|
|
112
|
+
return self.lease.ttl()
|
|
113
|
+
|
|
114
|
+
def extend_expiry(self, expiry):
|
|
115
|
+
"""Extends the owner key (aka the claim) expiry for this job.
|
|
116
|
+
|
|
117
|
+
Returns ``True`` if the expiry request was performed
|
|
118
|
+
otherwise ``False``.
|
|
119
|
+
"""
|
|
120
|
+
if self.lease is None:
|
|
121
|
+
return False
|
|
122
|
+
ret = self.lease.refresh()
|
|
123
|
+
return (ret > 0)
|
|
124
|
+
|
|
125
|
+
@property
|
|
126
|
+
def root(self):
|
|
127
|
+
return self._root
|
|
128
|
+
|
|
129
|
+
def __lt__(self, other):
|
|
130
|
+
if not isinstance(other, EtcdJob):
|
|
131
|
+
return NotImplemented
|
|
132
|
+
if self.root == other.root:
|
|
133
|
+
if self.priority == other.priority:
|
|
134
|
+
return self.sequence < other.sequence
|
|
135
|
+
else:
|
|
136
|
+
ordered = base.JobPriority.reorder(
|
|
137
|
+
(self.priority, self), (other.priority, other))
|
|
138
|
+
if ordered[0] is self:
|
|
139
|
+
return False
|
|
140
|
+
return True
|
|
141
|
+
else:
|
|
142
|
+
# Different jobboards with different roots...
|
|
143
|
+
return self.root < other.root
|
|
144
|
+
|
|
145
|
+
def __eq__(self, other):
|
|
146
|
+
if not isinstance(other, EtcdJob):
|
|
147
|
+
return NotImplemented
|
|
148
|
+
return ((self.root, self.sequence, self.priority) ==
|
|
149
|
+
(other.root, other.sequence, other.priority))
|
|
150
|
+
|
|
151
|
+
def __ne__(self, other):
|
|
152
|
+
return not self.__eq__(other)
|
|
153
|
+
|
|
154
|
+
def __hash__(self):
|
|
155
|
+
return hash(self.key)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
class EtcdJobBoard(base.JobBoard):
|
|
159
|
+
"""A jobboard backed by `etcd`_.
|
|
160
|
+
|
|
161
|
+
This jobboard creates sequenced key/value pairs in etcd. Each key
|
|
162
|
+
represents a job and its associated value contains the parameter of the
|
|
163
|
+
job encoded in
|
|
164
|
+
json.
|
|
165
|
+
The users of the jobboard can iterate over the available job and decide if
|
|
166
|
+
they want to attempt to claim one job by calling the :meth:`.claim` method.
|
|
167
|
+
Claiming a job consists in atomically create a key based on the key of job
|
|
168
|
+
and the ".lock" postfix. If the atomic creation of the key is successful
|
|
169
|
+
the job belongs to the user. Any attempt to lock an already locked job
|
|
170
|
+
will fail.
|
|
171
|
+
When a job is complete, the user consumes the job by calling the
|
|
172
|
+
:meth:`.consume` method, it deletes the job and the lock from etcd.
|
|
173
|
+
Alternatively, a user can trash (:meth:`.trash`) or abandon
|
|
174
|
+
(:meth:`.abandon`) if they want to delete the job or leave it for another
|
|
175
|
+
user.
|
|
176
|
+
Etcd doesn't provide a method for unlocking the jobs when a consumer dies.
|
|
177
|
+
The Etcd jobboard provides timed expirations, based on a global ``ttl``
|
|
178
|
+
configuration setting or the ``expiry`` parameter of the :meth:`.claim`
|
|
179
|
+
method. When this time-to-live/expiry is reached, the job is automatically
|
|
180
|
+
unlocked and another consumer can claim it. If it is expected that a task
|
|
181
|
+
of a job takes more time than the defined time-to-live, the
|
|
182
|
+
consumer can refresh the timer by calling the :meth:`EtcdJob.extend_expiry`
|
|
183
|
+
function.
|
|
184
|
+
|
|
185
|
+
.. _etcd: https://etcd.io/
|
|
186
|
+
"""
|
|
187
|
+
ROOT_PATH = "/taskflow/jobs"
|
|
188
|
+
|
|
189
|
+
TRASH_PATH = "/taskflow/.trash"
|
|
190
|
+
|
|
191
|
+
DEFAULT_PATH = "jobboard"
|
|
192
|
+
|
|
193
|
+
JOB_PREFIX = "job"
|
|
194
|
+
|
|
195
|
+
SEQUENCE_KEY = "sequence"
|
|
196
|
+
|
|
197
|
+
DATA_POSTFIX = ".data"
|
|
198
|
+
|
|
199
|
+
LOCK_POSTFIX = ".lock"
|
|
200
|
+
|
|
201
|
+
LAST_MODIFIED_POSTFIX = ".last_modified"
|
|
202
|
+
|
|
203
|
+
ETCD_CONFIG_OPTIONS = (
|
|
204
|
+
("host", str),
|
|
205
|
+
("port", int),
|
|
206
|
+
("protocol", str),
|
|
207
|
+
("ca_cert", str),
|
|
208
|
+
("cert_key", str),
|
|
209
|
+
("cert_cert", str),
|
|
210
|
+
("timeout", float),
|
|
211
|
+
("api_path", str),
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
INIT_STATE = 'init'
|
|
215
|
+
CONNECTED_STATE = 'connected'
|
|
216
|
+
FETCH_STATE = 'fetched'
|
|
217
|
+
|
|
218
|
+
_client: etcd3gw.Etcd3Client
|
|
219
|
+
|
|
220
|
+
def __init__(self, name, conf, client=None, persistence=None):
|
|
221
|
+
super().__init__(name, conf)
|
|
222
|
+
|
|
223
|
+
self._client = client
|
|
224
|
+
self._persistence = persistence
|
|
225
|
+
self._state = self.INIT_STATE
|
|
226
|
+
|
|
227
|
+
path_elems = [self.ROOT_PATH,
|
|
228
|
+
self._conf.get("path", self.DEFAULT_PATH)]
|
|
229
|
+
self._root_path = self._create_path(*path_elems)
|
|
230
|
+
|
|
231
|
+
self._job_cache = {}
|
|
232
|
+
self._job_cond = threading.Condition()
|
|
233
|
+
|
|
234
|
+
self._open_close_lock = threading.RLock()
|
|
235
|
+
|
|
236
|
+
self._watcher_thd = None
|
|
237
|
+
self._thread_cancel = None
|
|
238
|
+
self._watcher = None
|
|
239
|
+
self._watcher_cancel = None
|
|
240
|
+
|
|
241
|
+
def _create_path(self, root, *args):
|
|
242
|
+
return "/".join([root] + [a.strip("/") for a in args])
|
|
243
|
+
|
|
244
|
+
def incr(self, key):
|
|
245
|
+
"""Atomically increment an integer, create it if it doesn't exist"""
|
|
246
|
+
while True:
|
|
247
|
+
value = self._client.get(key)
|
|
248
|
+
if not value:
|
|
249
|
+
res = self._client.create(key, 1)
|
|
250
|
+
if res:
|
|
251
|
+
return 1
|
|
252
|
+
# Another thread has just created the key after we failed to
|
|
253
|
+
# read it, retry to get the new current value
|
|
254
|
+
continue
|
|
255
|
+
|
|
256
|
+
value = int(value[0])
|
|
257
|
+
next_value = value + 1
|
|
258
|
+
|
|
259
|
+
res = self._client.replace(key, value, next_value)
|
|
260
|
+
if res:
|
|
261
|
+
return next_value
|
|
262
|
+
|
|
263
|
+
def get_one(self, key):
|
|
264
|
+
if self._client is None:
|
|
265
|
+
raise exc.JobFailure(f"Cannot read key {key}, client is closed")
|
|
266
|
+
value = self._client.get(key)
|
|
267
|
+
if not value:
|
|
268
|
+
return None
|
|
269
|
+
return value[0]
|
|
270
|
+
|
|
271
|
+
def _fetch_jobs(self, only_unclaimed=False, ensure_fresh=False):
|
|
272
|
+
# TODO(gthiemonge) only_unclaimed is ignored
|
|
273
|
+
if ensure_fresh or self._state != self.FETCH_STATE:
|
|
274
|
+
self._ensure_fresh()
|
|
275
|
+
return sorted(self._job_cache.values())
|
|
276
|
+
|
|
277
|
+
def _ensure_fresh(self):
|
|
278
|
+
prefix = self._create_path(self._root_path, self.JOB_PREFIX)
|
|
279
|
+
jobs = self._client.get_prefix(prefix)
|
|
280
|
+
listed_jobs = {}
|
|
281
|
+
for job in jobs:
|
|
282
|
+
data, metadata = job
|
|
283
|
+
key = misc.binary_decode(metadata['key'])
|
|
284
|
+
if key.endswith(self.DATA_POSTFIX):
|
|
285
|
+
key = key.rstrip(self.DATA_POSTFIX)
|
|
286
|
+
listed_jobs[key] = data
|
|
287
|
+
|
|
288
|
+
removed_jobs = []
|
|
289
|
+
with self._job_cond:
|
|
290
|
+
for key in self._job_cache.keys():
|
|
291
|
+
if key not in listed_jobs:
|
|
292
|
+
removed_jobs.append(key)
|
|
293
|
+
for key in removed_jobs:
|
|
294
|
+
self._remove_job_from_cache(key)
|
|
295
|
+
|
|
296
|
+
for key, data in listed_jobs.items():
|
|
297
|
+
self._process_incoming_job(key, data)
|
|
298
|
+
self._state = self.FETCH_STATE
|
|
299
|
+
|
|
300
|
+
def _process_incoming_job(self, key, data):
|
|
301
|
+
try:
|
|
302
|
+
job_data = jsonutils.loads(data)
|
|
303
|
+
except jsonutils.json.JSONDecodeError:
|
|
304
|
+
msg = ("Incorrectly formatted job data found at "
|
|
305
|
+
f"key: {key}")
|
|
306
|
+
LOG.warning(msg, exc_info=True)
|
|
307
|
+
LOG.info("Deleting invalid job data at key: %s", key)
|
|
308
|
+
self._client.delete(key)
|
|
309
|
+
raise exc.JobFailure(msg)
|
|
310
|
+
|
|
311
|
+
with self._job_cond:
|
|
312
|
+
if key not in self._job_cache:
|
|
313
|
+
job_priority = base.JobPriority.convert(job_data["priority"])
|
|
314
|
+
new_job = EtcdJob(self,
|
|
315
|
+
job_data["name"],
|
|
316
|
+
self._client,
|
|
317
|
+
key,
|
|
318
|
+
uuid=job_data["uuid"],
|
|
319
|
+
details=job_data.get("details", {}),
|
|
320
|
+
backend=self._persistence,
|
|
321
|
+
book_data=job_data.get("book"),
|
|
322
|
+
priority=job_priority,
|
|
323
|
+
sequence=job_data["sequence"])
|
|
324
|
+
self._job_cache[key] = new_job
|
|
325
|
+
self._job_cond.notify_all()
|
|
326
|
+
|
|
327
|
+
def _remove_job_from_cache(self, key):
|
|
328
|
+
"""Remove job from cache."""
|
|
329
|
+
with self._job_cond:
|
|
330
|
+
if key in self._job_cache:
|
|
331
|
+
self._job_cache.pop(key, None)
|
|
332
|
+
|
|
333
|
+
def _board_removal_func(self, job):
|
|
334
|
+
try:
|
|
335
|
+
self._remove_job_from_cache(job.key)
|
|
336
|
+
self._client.delete_prefix(job.key)
|
|
337
|
+
except Exception:
|
|
338
|
+
LOG.exception(f"Failed to delete prefix {job.key}")
|
|
339
|
+
|
|
340
|
+
def iterjobs(self, only_unclaimed=False, ensure_fresh=False):
|
|
341
|
+
"""Returns an iterator of jobs that are currently on this board."""
|
|
342
|
+
return base.JobBoardIterator(
|
|
343
|
+
self, LOG, only_unclaimed=only_unclaimed,
|
|
344
|
+
ensure_fresh=ensure_fresh,
|
|
345
|
+
board_fetch_func=self._fetch_jobs,
|
|
346
|
+
board_removal_func=self._board_removal_func)
|
|
347
|
+
|
|
348
|
+
def wait(self, timeout=None):
|
|
349
|
+
"""Waits a given amount of time for **any** jobs to be posted."""
|
|
350
|
+
# Wait until timeout expires (or forever) for jobs to appear.
|
|
351
|
+
watch = timeutils.StopWatch(duration=timeout)
|
|
352
|
+
watch.start()
|
|
353
|
+
with self._job_cond:
|
|
354
|
+
while True:
|
|
355
|
+
if not self._job_cache:
|
|
356
|
+
if watch.expired():
|
|
357
|
+
raise exc.NotFound("Expired waiting for jobs to"
|
|
358
|
+
" arrive; waited %s seconds"
|
|
359
|
+
% watch.elapsed())
|
|
360
|
+
# This is done since the given timeout can not be provided
|
|
361
|
+
# to the condition variable, since we can not ensure that
|
|
362
|
+
# when we acquire the condition that there will actually
|
|
363
|
+
# be jobs (especially if we are spuriously awaken), so we
|
|
364
|
+
# must recalculate the amount of time we really have left.
|
|
365
|
+
self._job_cond.wait(watch.leftover(return_none=True))
|
|
366
|
+
else:
|
|
367
|
+
curr_jobs = self._fetch_jobs()
|
|
368
|
+
fetch_func = lambda ensure_fresh: curr_jobs
|
|
369
|
+
removal_func = lambda a_job: self._remove_job_from_cache(
|
|
370
|
+
a_job.key)
|
|
371
|
+
return base.JobBoardIterator(
|
|
372
|
+
self, LOG, board_fetch_func=fetch_func,
|
|
373
|
+
board_removal_func=removal_func)
|
|
374
|
+
|
|
375
|
+
@property
|
|
376
|
+
def job_count(self):
|
|
377
|
+
"""Returns how many jobs are on this jobboard."""
|
|
378
|
+
return len(self._job_cache)
|
|
379
|
+
|
|
380
|
+
def get_owner_data(self, job: EtcdJob) -> typing.Optional[dict]:
|
|
381
|
+
owner_key = job.key + self.LOCK_POSTFIX
|
|
382
|
+
owner_data = self.get_one(owner_key)
|
|
383
|
+
if not owner_data:
|
|
384
|
+
return None
|
|
385
|
+
return jsonutils.loads(owner_data)
|
|
386
|
+
|
|
387
|
+
def find_owner(self, job: EtcdJob) -> typing.Optional[dict]:
|
|
388
|
+
"""Gets the owner of the job if one exists."""
|
|
389
|
+
data = self.get_owner_data(job)
|
|
390
|
+
if data:
|
|
391
|
+
return data['owner']
|
|
392
|
+
return None
|
|
393
|
+
|
|
394
|
+
def get_data(self, job: EtcdJob) -> bytes:
|
|
395
|
+
key = job.key + self.DATA_POSTFIX
|
|
396
|
+
return self.get_one(key)
|
|
397
|
+
|
|
398
|
+
def get_owner_and_data(self, job: EtcdJob) -> tuple[
|
|
399
|
+
typing.Optional[str], typing.Optional[bytes]]:
|
|
400
|
+
if self._client is None:
|
|
401
|
+
raise exc.JobFailure("Cannot retrieve information, "
|
|
402
|
+
"not connected")
|
|
403
|
+
|
|
404
|
+
job_data = None
|
|
405
|
+
job_owner = None
|
|
406
|
+
|
|
407
|
+
for data, metadata in self._client.get_prefix(job.key + "."):
|
|
408
|
+
key = misc.binary_decode(metadata["key"])
|
|
409
|
+
if key.endswith(self.DATA_POSTFIX):
|
|
410
|
+
# bytes?
|
|
411
|
+
job_data = data
|
|
412
|
+
elif key.endswith(self.LOCK_POSTFIX):
|
|
413
|
+
data = jsonutils.loads(data)
|
|
414
|
+
job_owner = data["owner"]
|
|
415
|
+
|
|
416
|
+
return job_owner, job_data
|
|
417
|
+
|
|
418
|
+
def set_last_modified(self, job: EtcdJob):
|
|
419
|
+
key = job.key + self.LAST_MODIFIED_POSTFIX
|
|
420
|
+
|
|
421
|
+
now = timeutils.utcnow()
|
|
422
|
+
self._client.put(key, jsonutils.dumps({"last_modified": now}))
|
|
423
|
+
|
|
424
|
+
def get_last_modified(self, job: EtcdJob):
|
|
425
|
+
key = job.key + self.LAST_MODIFIED_POSTFIX
|
|
426
|
+
|
|
427
|
+
return self.get_one(key)
|
|
428
|
+
|
|
429
|
+
def post(self, name, book=None, details=None,
|
|
430
|
+
priority=base.JobPriority.NORMAL) -> EtcdJob:
|
|
431
|
+
"""Atomically creates and posts a job to the jobboard."""
|
|
432
|
+
job_priority = base.JobPriority.convert(priority)
|
|
433
|
+
job_uuid = uuidutils.generate_uuid()
|
|
434
|
+
job_posting = base.format_posting(job_uuid, name,
|
|
435
|
+
created_on=timeutils.utcnow(),
|
|
436
|
+
book=book, details=details,
|
|
437
|
+
priority=job_priority)
|
|
438
|
+
seq = self.incr(self._create_path(self._root_path, self.SEQUENCE_KEY))
|
|
439
|
+
key = self._create_path(self._root_path, f"{self.JOB_PREFIX}{seq}")
|
|
440
|
+
|
|
441
|
+
job_posting["sequence"] = seq
|
|
442
|
+
raw_job_posting = jsonutils.dumps(job_posting)
|
|
443
|
+
|
|
444
|
+
data_key = key + self.DATA_POSTFIX
|
|
445
|
+
|
|
446
|
+
self._client.create(data_key, raw_job_posting)
|
|
447
|
+
job = EtcdJob(self, name, self._client, key,
|
|
448
|
+
uuid=job_uuid,
|
|
449
|
+
details=details,
|
|
450
|
+
backend=self._persistence,
|
|
451
|
+
book=book,
|
|
452
|
+
book_data=job_posting.get('book'),
|
|
453
|
+
priority=job_priority,
|
|
454
|
+
sequence=seq)
|
|
455
|
+
with self._job_cond:
|
|
456
|
+
self._job_cache[key] = job
|
|
457
|
+
self._job_cond.notify_all()
|
|
458
|
+
return job
|
|
459
|
+
|
|
460
|
+
@base.check_who
|
|
461
|
+
def claim(self, job, who, expiry=None):
|
|
462
|
+
"""Atomically attempts to claim the provided job."""
|
|
463
|
+
owner_key = job.key + self.LOCK_POSTFIX
|
|
464
|
+
|
|
465
|
+
ttl = expiry or self._conf.get('ttl', None)
|
|
466
|
+
|
|
467
|
+
if ttl:
|
|
468
|
+
lease = self._client.lease(ttl=ttl)
|
|
469
|
+
else:
|
|
470
|
+
lease = None
|
|
471
|
+
|
|
472
|
+
owner_dict = {
|
|
473
|
+
"owner": who,
|
|
474
|
+
}
|
|
475
|
+
if lease:
|
|
476
|
+
owner_dict["lease_id"] = lease.id
|
|
477
|
+
|
|
478
|
+
owner_value = jsonutils.dumps(owner_dict)
|
|
479
|
+
|
|
480
|
+
# Create a lock for the job, if the lock already exists, the job
|
|
481
|
+
# is owned by another worker
|
|
482
|
+
created = self._client.create(owner_key, owner_value, lease=lease)
|
|
483
|
+
if not created:
|
|
484
|
+
# Creation is denied, revoke the lease, we cannot claim the job.
|
|
485
|
+
if lease:
|
|
486
|
+
lease.revoke()
|
|
487
|
+
|
|
488
|
+
owner = self.find_owner(job)
|
|
489
|
+
if owner:
|
|
490
|
+
message = f"Job {job.uuid} already claimed by '{owner}'"
|
|
491
|
+
else:
|
|
492
|
+
message = f"Job {job.uuid} already claimed"
|
|
493
|
+
raise exc.UnclaimableJob(message)
|
|
494
|
+
|
|
495
|
+
# Ensure that the job still exists, it may have been claimed and
|
|
496
|
+
# consumed by another thread before we enter this function
|
|
497
|
+
if not self.get_data(job):
|
|
498
|
+
# Revoke the lease
|
|
499
|
+
if lease:
|
|
500
|
+
lease.revoke()
|
|
501
|
+
else:
|
|
502
|
+
self._client.delete(owner_key)
|
|
503
|
+
raise exc.UnclaimableJob(f"Job {job.uuid} already deleted.")
|
|
504
|
+
|
|
505
|
+
self.set_last_modified(job)
|
|
506
|
+
|
|
507
|
+
@base.check_who
|
|
508
|
+
def consume(self, job, who):
|
|
509
|
+
"""Permanently (and atomically) removes a job from the jobboard."""
|
|
510
|
+
owner, data = self.get_owner_and_data(job)
|
|
511
|
+
if data is None or owner is None:
|
|
512
|
+
raise exc.NotFound(f"Cannot find job {job.uuid}")
|
|
513
|
+
if owner != who:
|
|
514
|
+
raise exc.JobFailure(f"Cannot consume a job {job.uuid}"
|
|
515
|
+
f" which is not owned by {who}")
|
|
516
|
+
|
|
517
|
+
self._client.delete_prefix(job.key + ".")
|
|
518
|
+
self._remove_job_from_cache(job.key)
|
|
519
|
+
|
|
520
|
+
@base.check_who
|
|
521
|
+
def abandon(self, job, who):
|
|
522
|
+
"""Atomically attempts to abandon the provided job."""
|
|
523
|
+
owner, data = self.get_owner_and_data(job)
|
|
524
|
+
if data is None or owner is None:
|
|
525
|
+
raise exc.NotFound(f"Cannot find job {job.uuid}")
|
|
526
|
+
if owner != who:
|
|
527
|
+
raise exc.JobFailure(f"Cannot abandon a job {job.uuid}"
|
|
528
|
+
f" which is not owned by {who}")
|
|
529
|
+
|
|
530
|
+
owner_key = job.key + self.LOCK_POSTFIX
|
|
531
|
+
self._client.delete(owner_key)
|
|
532
|
+
|
|
533
|
+
@base.check_who
|
|
534
|
+
def trash(self, job, who):
|
|
535
|
+
"""Trash the provided job."""
|
|
536
|
+
owner, data = self.get_owner_and_data(job)
|
|
537
|
+
if data is None or owner is None:
|
|
538
|
+
raise exc.NotFound(f"Cannot find job {job.uuid}")
|
|
539
|
+
if owner != who:
|
|
540
|
+
raise exc.JobFailure(f"Cannot trash a job {job.uuid} "
|
|
541
|
+
f"which is not owned by {who}")
|
|
542
|
+
|
|
543
|
+
trash_key = job.key.replace(self.ROOT_PATH, self.TRASH_PATH)
|
|
544
|
+
self._client.create(trash_key, data)
|
|
545
|
+
self._client.delete_prefix(job.key + ".")
|
|
546
|
+
self._remove_job_from_cache(job.key)
|
|
547
|
+
|
|
548
|
+
def register_entity(self, entity: 'entity.Entity'):
|
|
549
|
+
"""Register an entity to the jobboard('s backend), e.g: a conductor"""
|
|
550
|
+
# TODO(gthiemonge) Doesn't seem to be useful with Etcd
|
|
551
|
+
|
|
552
|
+
@property
|
|
553
|
+
def connected(self):
|
|
554
|
+
"""Returns if this jobboard is connected."""
|
|
555
|
+
return self._client is not None
|
|
556
|
+
|
|
557
|
+
@fasteners.locked(lock='_open_close_lock')
|
|
558
|
+
def connect(self):
|
|
559
|
+
"""Opens the connection to any backend system."""
|
|
560
|
+
if self._client is None:
|
|
561
|
+
etcd_conf = {}
|
|
562
|
+
for config_opts in self.ETCD_CONFIG_OPTIONS:
|
|
563
|
+
key, value_type = config_opts
|
|
564
|
+
if key in self._conf:
|
|
565
|
+
etcd_conf[key] = value_type(self._conf[key])
|
|
566
|
+
|
|
567
|
+
self._client = etcd3gw.Etcd3Client(**etcd_conf)
|
|
568
|
+
self._state = self.CONNECTED_STATE
|
|
569
|
+
|
|
570
|
+
watch_url = self._create_path(self._root_path, self.JOB_PREFIX)
|
|
571
|
+
self._thread_cancel = threading.Event()
|
|
572
|
+
try:
|
|
573
|
+
(self._watcher,
|
|
574
|
+
self._watcher_cancel) = self._client.watch_prefix(watch_url)
|
|
575
|
+
except etcd3gw.exceptions.ConnectionFailedError:
|
|
576
|
+
exc.raise_with_cause(exc.JobFailure,
|
|
577
|
+
"Failed to connect to Etcd")
|
|
578
|
+
self._watcher_thd = threading.Thread(target=self._watcher_thread)
|
|
579
|
+
self._watcher_thd.start()
|
|
580
|
+
|
|
581
|
+
def _watcher_thread(self):
|
|
582
|
+
while not self._thread_cancel.is_set():
|
|
583
|
+
for event in self._watcher:
|
|
584
|
+
if "kv" not in event:
|
|
585
|
+
continue
|
|
586
|
+
|
|
587
|
+
key_value = event["kv"]
|
|
588
|
+
key = misc.binary_decode(key_value["key"])
|
|
589
|
+
|
|
590
|
+
if key.endswith(self.DATA_POSTFIX):
|
|
591
|
+
key = key.rstrip(self.DATA_POSTFIX)
|
|
592
|
+
if event.get("type") == "DELETE":
|
|
593
|
+
self._remove_job_from_cache(key)
|
|
594
|
+
else:
|
|
595
|
+
data = key_value["value"]
|
|
596
|
+
self._process_incoming_job(key, data)
|
|
597
|
+
|
|
598
|
+
@fasteners.locked(lock='_open_close_lock')
|
|
599
|
+
def close(self):
|
|
600
|
+
"""Close the connection to any backend system."""
|
|
601
|
+
if self._client is not None:
|
|
602
|
+
if self._watcher_cancel is not None:
|
|
603
|
+
self._watcher_cancel()
|
|
604
|
+
if self._thread_cancel is not None:
|
|
605
|
+
self._thread_cancel.set()
|
|
606
|
+
if self._watcher_thd is not None:
|
|
607
|
+
self._watcher_thd.join()
|
|
608
|
+
del self._client
|
|
609
|
+
self._client = None
|
|
610
|
+
self._state = self.INIT_STATE
|