durabletask 0.1.0a5__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of durabletask might be problematic. Click here for more details.
- durabletask/client.py +50 -22
- durabletask/internal/grpc_interceptor.py +7 -8
- durabletask/internal/helpers.py +16 -16
- durabletask/internal/orchestrator_service_pb2.py +194 -136
- durabletask/internal/orchestrator_service_pb2.pyi +890 -0
- durabletask/internal/orchestrator_service_pb2_grpc.py +547 -380
- durabletask/internal/shared.py +37 -9
- durabletask/task.py +174 -42
- durabletask/worker.py +198 -89
- {durabletask-0.1.0a5.dist-info → durabletask-0.2.0.dist-info}/METADATA +11 -18
- durabletask-0.2.0.dist-info/RECORD +14 -0
- {durabletask-0.1.0a5.dist-info → durabletask-0.2.0.dist-info}/WHEEL +1 -1
- durabletask/internal/__init__.py +0 -0
- durabletask-0.1.0a5.dist-info/LICENSE +0 -21
- durabletask-0.1.0a5.dist-info/RECORD +0 -15
- {durabletask-0.1.0a5.dist-info → durabletask-0.2.0.dist-info}/top_level.txt +0 -0
durabletask/worker.py
CHANGED
|
@@ -6,7 +6,7 @@ import logging
|
|
|
6
6
|
from datetime import datetime, timedelta
|
|
7
7
|
from threading import Event, Thread
|
|
8
8
|
from types import GeneratorType
|
|
9
|
-
from typing import Any,
|
|
9
|
+
from typing import Any, Generator, Optional, Sequence, TypeVar, Union
|
|
10
10
|
|
|
11
11
|
import grpc
|
|
12
12
|
from google.protobuf import empty_pb2
|
|
@@ -17,6 +17,7 @@ import durabletask.internal.orchestrator_service_pb2 as pb
|
|
|
17
17
|
import durabletask.internal.orchestrator_service_pb2_grpc as stubs
|
|
18
18
|
import durabletask.internal.shared as shared
|
|
19
19
|
from durabletask import task
|
|
20
|
+
from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl
|
|
20
21
|
|
|
21
22
|
TInput = TypeVar('TInput')
|
|
22
23
|
TOutput = TypeVar('TOutput')
|
|
@@ -24,8 +25,8 @@ TOutput = TypeVar('TOutput')
|
|
|
24
25
|
|
|
25
26
|
class _Registry:
|
|
26
27
|
|
|
27
|
-
orchestrators:
|
|
28
|
-
activities:
|
|
28
|
+
orchestrators: dict[str, task.Orchestrator]
|
|
29
|
+
activities: dict[str, task.Activity]
|
|
29
30
|
|
|
30
31
|
def __init__(self):
|
|
31
32
|
self.orchestrators = {}
|
|
@@ -47,7 +48,7 @@ class _Registry:
|
|
|
47
48
|
|
|
48
49
|
self.orchestrators[name] = fn
|
|
49
50
|
|
|
50
|
-
def get_orchestrator(self, name: str) ->
|
|
51
|
+
def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]:
|
|
51
52
|
return self.orchestrators.get(name)
|
|
52
53
|
|
|
53
54
|
def add_activity(self, fn: task.Activity) -> str:
|
|
@@ -66,7 +67,7 @@ class _Registry:
|
|
|
66
67
|
|
|
67
68
|
self.activities[name] = fn
|
|
68
69
|
|
|
69
|
-
def get_activity(self, name: str) ->
|
|
70
|
+
def get_activity(self, name: str) -> Optional[task.Activity]:
|
|
70
71
|
return self.activities.get(name)
|
|
71
72
|
|
|
72
73
|
|
|
@@ -81,23 +82,33 @@ class ActivityNotRegisteredError(ValueError):
|
|
|
81
82
|
|
|
82
83
|
|
|
83
84
|
class TaskHubGrpcWorker:
|
|
84
|
-
_response_stream:
|
|
85
|
+
_response_stream: Optional[grpc.Future] = None
|
|
86
|
+
_interceptors: Optional[list[shared.ClientInterceptor]] = None
|
|
85
87
|
|
|
86
88
|
def __init__(self, *,
|
|
87
|
-
host_address:
|
|
88
|
-
metadata:
|
|
89
|
-
log_handler
|
|
90
|
-
log_formatter:
|
|
91
|
-
secure_channel: bool = False
|
|
89
|
+
host_address: Optional[str] = None,
|
|
90
|
+
metadata: Optional[list[tuple[str, str]]] = None,
|
|
91
|
+
log_handler=None,
|
|
92
|
+
log_formatter: Optional[logging.Formatter] = None,
|
|
93
|
+
secure_channel: bool = False,
|
|
94
|
+
interceptors: Optional[Sequence[shared.ClientInterceptor]] = None):
|
|
92
95
|
self._registry = _Registry()
|
|
93
96
|
self._host_address = host_address if host_address else shared.get_default_host_address()
|
|
94
|
-
self._metadata = metadata
|
|
95
97
|
self._logger = shared.get_logger("worker", log_handler, log_formatter)
|
|
96
98
|
self._shutdown = Event()
|
|
97
|
-
self._response_stream = None
|
|
98
99
|
self._is_running = False
|
|
99
100
|
self._secure_channel = secure_channel
|
|
100
101
|
|
|
102
|
+
# Determine the interceptors to use
|
|
103
|
+
if interceptors is not None:
|
|
104
|
+
self._interceptors = list(interceptors)
|
|
105
|
+
if metadata:
|
|
106
|
+
self._interceptors.append(DefaultClientInterceptorImpl(metadata))
|
|
107
|
+
elif metadata:
|
|
108
|
+
self._interceptors = [DefaultClientInterceptorImpl(metadata)]
|
|
109
|
+
else:
|
|
110
|
+
self._interceptors = None
|
|
111
|
+
|
|
101
112
|
def __enter__(self):
|
|
102
113
|
return self
|
|
103
114
|
|
|
@@ -118,7 +129,7 @@ class TaskHubGrpcWorker:
|
|
|
118
129
|
|
|
119
130
|
def start(self):
|
|
120
131
|
"""Starts the worker on a background thread and begins listening for work items."""
|
|
121
|
-
channel = shared.get_grpc_channel(self._host_address, self.
|
|
132
|
+
channel = shared.get_grpc_channel(self._host_address, self._secure_channel, self._interceptors)
|
|
122
133
|
stub = stubs.TaskHubSidecarServiceStub(channel)
|
|
123
134
|
|
|
124
135
|
if self._is_running:
|
|
@@ -140,13 +151,15 @@ class TaskHubGrpcWorker:
|
|
|
140
151
|
|
|
141
152
|
# The stream blocks until either a work item is received or the stream is canceled
|
|
142
153
|
# by another thread (see the stop() method).
|
|
143
|
-
for work_item in self._response_stream:
|
|
154
|
+
for work_item in self._response_stream: # type: ignore
|
|
144
155
|
request_type = work_item.WhichOneof('request')
|
|
145
156
|
self._logger.debug(f'Received "{request_type}" work item')
|
|
146
157
|
if work_item.HasField('orchestratorRequest'):
|
|
147
|
-
executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub)
|
|
158
|
+
executor.submit(self._execute_orchestrator, work_item.orchestratorRequest, stub, work_item.completionToken)
|
|
148
159
|
elif work_item.HasField('activityRequest'):
|
|
149
|
-
executor.submit(self._execute_activity, work_item.activityRequest, stub)
|
|
160
|
+
executor.submit(self._execute_activity, work_item.activityRequest, stub, work_item.completionToken)
|
|
161
|
+
elif work_item.HasField('healthPing'):
|
|
162
|
+
pass # no-op
|
|
150
163
|
else:
|
|
151
164
|
self._logger.warning(f'Unexpected work item type: {request_type}')
|
|
152
165
|
|
|
@@ -185,23 +198,27 @@ class TaskHubGrpcWorker:
|
|
|
185
198
|
self._logger.info("Worker shutdown completed")
|
|
186
199
|
self._is_running = False
|
|
187
200
|
|
|
188
|
-
def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub):
|
|
201
|
+
def _execute_orchestrator(self, req: pb.OrchestratorRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken):
|
|
189
202
|
try:
|
|
190
203
|
executor = _OrchestrationExecutor(self._registry, self._logger)
|
|
191
|
-
|
|
192
|
-
res = pb.OrchestratorResponse(
|
|
204
|
+
result = executor.execute(req.instanceId, req.pastEvents, req.newEvents)
|
|
205
|
+
res = pb.OrchestratorResponse(
|
|
206
|
+
instanceId=req.instanceId,
|
|
207
|
+
actions=result.actions,
|
|
208
|
+
customStatus=pbh.get_string_value(result.encoded_custom_status),
|
|
209
|
+
completionToken=completionToken)
|
|
193
210
|
except Exception as ex:
|
|
194
211
|
self._logger.exception(f"An error occurred while trying to execute instance '{req.instanceId}': {ex}")
|
|
195
212
|
failure_details = pbh.new_failure_details(ex)
|
|
196
213
|
actions = [pbh.new_complete_orchestration_action(-1, pb.ORCHESTRATION_STATUS_FAILED, "", failure_details)]
|
|
197
|
-
res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions)
|
|
214
|
+
res = pb.OrchestratorResponse(instanceId=req.instanceId, actions=actions, completionToken=completionToken)
|
|
198
215
|
|
|
199
216
|
try:
|
|
200
217
|
stub.CompleteOrchestratorTask(res)
|
|
201
218
|
except Exception as ex:
|
|
202
219
|
self._logger.exception(f"Failed to deliver orchestrator response for '{req.instanceId}' to sidecar: {ex}")
|
|
203
220
|
|
|
204
|
-
def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub):
|
|
221
|
+
def _execute_activity(self, req: pb.ActivityRequest, stub: stubs.TaskHubSidecarServiceStub, completionToken):
|
|
205
222
|
instance_id = req.orchestrationInstance.instanceId
|
|
206
223
|
try:
|
|
207
224
|
executor = _ActivityExecutor(self._registry, self._logger)
|
|
@@ -209,12 +226,14 @@ class TaskHubGrpcWorker:
|
|
|
209
226
|
res = pb.ActivityResponse(
|
|
210
227
|
instanceId=instance_id,
|
|
211
228
|
taskId=req.taskId,
|
|
212
|
-
result=pbh.get_string_value(result)
|
|
229
|
+
result=pbh.get_string_value(result),
|
|
230
|
+
completionToken=completionToken)
|
|
213
231
|
except Exception as ex:
|
|
214
232
|
res = pb.ActivityResponse(
|
|
215
233
|
instanceId=instance_id,
|
|
216
234
|
taskId=req.taskId,
|
|
217
|
-
failureDetails=pbh.new_failure_details(ex)
|
|
235
|
+
failureDetails=pbh.new_failure_details(ex),
|
|
236
|
+
completionToken=completionToken)
|
|
218
237
|
|
|
219
238
|
try:
|
|
220
239
|
stub.CompleteActivityTask(res)
|
|
@@ -224,24 +243,25 @@ class TaskHubGrpcWorker:
|
|
|
224
243
|
|
|
225
244
|
|
|
226
245
|
class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
227
|
-
_generator:
|
|
228
|
-
_previous_task:
|
|
246
|
+
_generator: Optional[Generator[task.Task, Any, Any]]
|
|
247
|
+
_previous_task: Optional[task.Task]
|
|
229
248
|
|
|
230
249
|
def __init__(self, instance_id: str):
|
|
231
250
|
self._generator = None
|
|
232
251
|
self._is_replaying = True
|
|
233
252
|
self._is_complete = False
|
|
234
253
|
self._result = None
|
|
235
|
-
self._pending_actions:
|
|
236
|
-
self._pending_tasks:
|
|
254
|
+
self._pending_actions: dict[int, pb.OrchestratorAction] = {}
|
|
255
|
+
self._pending_tasks: dict[int, task.CompletableTask] = {}
|
|
237
256
|
self._sequence_number = 0
|
|
238
257
|
self._current_utc_datetime = datetime(1000, 1, 1)
|
|
239
258
|
self._instance_id = instance_id
|
|
240
|
-
self._completion_status:
|
|
241
|
-
self._received_events:
|
|
242
|
-
self._pending_events:
|
|
243
|
-
self._new_input:
|
|
259
|
+
self._completion_status: Optional[pb.OrchestrationStatus] = None
|
|
260
|
+
self._received_events: dict[str, list[Any]] = {}
|
|
261
|
+
self._pending_events: dict[str, list[task.CompletableTask]] = {}
|
|
262
|
+
self._new_input: Optional[Any] = None
|
|
244
263
|
self._save_events = False
|
|
264
|
+
self._encoded_custom_status: Optional[str] = None
|
|
245
265
|
|
|
246
266
|
def run(self, generator: Generator[task.Task, Any, Any]):
|
|
247
267
|
self._generator = generator
|
|
@@ -259,22 +279,20 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
259
279
|
# has reached a completed state. The only time this won't be the
|
|
260
280
|
# case is if the user yielded on a WhenAll task and there are still
|
|
261
281
|
# outstanding child tasks that need to be completed.
|
|
262
|
-
|
|
282
|
+
while self._previous_task is not None and self._previous_task.is_complete:
|
|
283
|
+
next_task = None
|
|
263
284
|
if self._previous_task.is_failed:
|
|
264
|
-
# Raise the failure as an exception to the generator.
|
|
265
|
-
# handle the exception or allow it to fail the orchestration.
|
|
266
|
-
self._generator.throw(self._previous_task.get_exception())
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
# If a completed task was returned, then we can keep running the generator function.
|
|
276
|
-
if not self._previous_task.is_complete:
|
|
277
|
-
break
|
|
285
|
+
# Raise the failure as an exception to the generator.
|
|
286
|
+
# The orchestrator can then either handle the exception or allow it to fail the orchestration.
|
|
287
|
+
next_task = self._generator.throw(self._previous_task.get_exception())
|
|
288
|
+
else:
|
|
289
|
+
# Resume the generator with the previous result.
|
|
290
|
+
# This will either return a Task or raise StopIteration if it's done.
|
|
291
|
+
next_task = self._generator.send(self._previous_task.get_result())
|
|
292
|
+
|
|
293
|
+
if not isinstance(next_task, task.Task):
|
|
294
|
+
raise TypeError("The orchestrator generator yielded a non-Task object")
|
|
295
|
+
self._previous_task = next_task
|
|
278
296
|
|
|
279
297
|
def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_encoded: bool = False):
|
|
280
298
|
if self._is_complete:
|
|
@@ -285,7 +303,7 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
285
303
|
self._pending_actions.clear() # Cancel any pending actions
|
|
286
304
|
|
|
287
305
|
self._result = result
|
|
288
|
-
result_json:
|
|
306
|
+
result_json: Optional[str] = None
|
|
289
307
|
if result is not None:
|
|
290
308
|
result_json = result if is_result_encoded else shared.to_json(result)
|
|
291
309
|
action = ph.new_complete_orchestration_action(
|
|
@@ -315,10 +333,10 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
315
333
|
self._new_input = new_input
|
|
316
334
|
self._save_events = save_events
|
|
317
335
|
|
|
318
|
-
def get_actions(self) ->
|
|
336
|
+
def get_actions(self) -> list[pb.OrchestratorAction]:
|
|
319
337
|
if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW:
|
|
320
338
|
# When continuing-as-new, we only return a single completion action.
|
|
321
|
-
carryover_events:
|
|
339
|
+
carryover_events: Optional[list[pb.HistoryEvent]] = None
|
|
322
340
|
if self._save_events:
|
|
323
341
|
carryover_events = []
|
|
324
342
|
# We need to save the current set of pending events so that they can be
|
|
@@ -357,44 +375,81 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
357
375
|
def current_utc_datetime(self, value: datetime):
|
|
358
376
|
self._current_utc_datetime = value
|
|
359
377
|
|
|
378
|
+
def set_custom_status(self, custom_status: Any) -> None:
|
|
379
|
+
self._encoded_custom_status = shared.to_json(custom_status) if custom_status is not None else None
|
|
380
|
+
|
|
360
381
|
def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task:
|
|
382
|
+
return self.create_timer_internal(fire_at)
|
|
383
|
+
|
|
384
|
+
def create_timer_internal(self, fire_at: Union[datetime, timedelta],
|
|
385
|
+
retryable_task: Optional[task.RetryableTask] = None) -> task.Task:
|
|
361
386
|
id = self.next_sequence_number()
|
|
362
387
|
if isinstance(fire_at, timedelta):
|
|
363
388
|
fire_at = self.current_utc_datetime + fire_at
|
|
364
389
|
action = ph.new_create_timer_action(id, fire_at)
|
|
365
390
|
self._pending_actions[id] = action
|
|
366
391
|
|
|
367
|
-
timer_task = task.
|
|
392
|
+
timer_task = task.TimerTask()
|
|
393
|
+
if retryable_task is not None:
|
|
394
|
+
timer_task.set_retryable_parent(retryable_task)
|
|
368
395
|
self._pending_tasks[id] = timer_task
|
|
369
396
|
return timer_task
|
|
370
397
|
|
|
371
398
|
def call_activity(self, activity: Union[task.Activity[TInput, TOutput], str], *,
|
|
372
|
-
input:
|
|
399
|
+
input: Optional[TInput] = None,
|
|
400
|
+
retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]:
|
|
373
401
|
id = self.next_sequence_number()
|
|
374
|
-
name = activity if isinstance(activity, str) else task.get_name(activity)
|
|
375
|
-
encoded_input = shared.to_json(input) if input is not None else None
|
|
376
|
-
action = ph.new_schedule_task_action(id, name, encoded_input)
|
|
377
|
-
self._pending_actions[id] = action
|
|
378
402
|
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
return
|
|
403
|
+
self.call_activity_function_helper(id, activity, input=input, retry_policy=retry_policy,
|
|
404
|
+
is_sub_orch=False)
|
|
405
|
+
return self._pending_tasks.get(id, task.CompletableTask())
|
|
382
406
|
|
|
383
407
|
def call_sub_orchestrator(self, orchestrator: task.Orchestrator[TInput, TOutput], *,
|
|
384
|
-
input:
|
|
385
|
-
instance_id:
|
|
408
|
+
input: Optional[TInput] = None,
|
|
409
|
+
instance_id: Optional[str] = None,
|
|
410
|
+
retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]:
|
|
386
411
|
id = self.next_sequence_number()
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
412
|
+
orchestrator_name = task.get_name(orchestrator)
|
|
413
|
+
self.call_activity_function_helper(id, orchestrator_name, input=input, retry_policy=retry_policy,
|
|
414
|
+
is_sub_orch=True, instance_id=instance_id)
|
|
415
|
+
return self._pending_tasks.get(id, task.CompletableTask())
|
|
416
|
+
|
|
417
|
+
def call_activity_function_helper(self, id: Optional[int],
|
|
418
|
+
activity_function: Union[task.Activity[TInput, TOutput], str], *,
|
|
419
|
+
input: Optional[TInput] = None,
|
|
420
|
+
retry_policy: Optional[task.RetryPolicy] = None,
|
|
421
|
+
is_sub_orch: bool = False,
|
|
422
|
+
instance_id: Optional[str] = None,
|
|
423
|
+
fn_task: Optional[task.CompletableTask[TOutput]] = None):
|
|
424
|
+
if id is None:
|
|
425
|
+
id = self.next_sequence_number()
|
|
426
|
+
|
|
427
|
+
if fn_task is None:
|
|
428
|
+
encoded_input = shared.to_json(input) if input is not None else None
|
|
429
|
+
else:
|
|
430
|
+
# Here, we don't need to convert the input to JSON because it is already converted.
|
|
431
|
+
# We just need to take string representation of it.
|
|
432
|
+
encoded_input = str(input)
|
|
433
|
+
if not is_sub_orch:
|
|
434
|
+
name = activity_function if isinstance(activity_function, str) else task.get_name(activity_function)
|
|
435
|
+
action = ph.new_schedule_task_action(id, name, encoded_input)
|
|
436
|
+
else:
|
|
437
|
+
if instance_id is None:
|
|
438
|
+
# Create a deteministic instance ID based on the parent instance ID
|
|
439
|
+
instance_id = f"{self.instance_id}:{id:04x}"
|
|
440
|
+
if not isinstance(activity_function, str):
|
|
441
|
+
raise ValueError("Orchestrator function name must be a string")
|
|
442
|
+
action = ph.new_create_sub_orchestration_action(id, activity_function, instance_id, encoded_input)
|
|
393
443
|
self._pending_actions[id] = action
|
|
394
444
|
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
445
|
+
if fn_task is None:
|
|
446
|
+
if retry_policy is None:
|
|
447
|
+
fn_task = task.CompletableTask[TOutput]()
|
|
448
|
+
else:
|
|
449
|
+
fn_task = task.RetryableTask[TOutput](retry_policy=retry_policy, action=action,
|
|
450
|
+
start_time=self.current_utc_datetime,
|
|
451
|
+
is_sub_orch=is_sub_orch)
|
|
452
|
+
self._pending_tasks[id] = fn_task
|
|
398
453
|
|
|
399
454
|
def wait_for_external_event(self, name: str) -> task.Task:
|
|
400
455
|
# Check to see if this event has already been received, in which case we
|
|
@@ -425,17 +480,25 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
425
480
|
self.set_continued_as_new(new_input, save_events)
|
|
426
481
|
|
|
427
482
|
|
|
483
|
+
class ExecutionResults:
|
|
484
|
+
actions: list[pb.OrchestratorAction]
|
|
485
|
+
encoded_custom_status: Optional[str]
|
|
486
|
+
|
|
487
|
+
def __init__(self, actions: list[pb.OrchestratorAction], encoded_custom_status: Optional[str]):
|
|
488
|
+
self.actions = actions
|
|
489
|
+
self.encoded_custom_status = encoded_custom_status
|
|
490
|
+
|
|
491
|
+
|
|
428
492
|
class _OrchestrationExecutor:
|
|
429
|
-
_generator:
|
|
493
|
+
_generator: Optional[task.Orchestrator] = None
|
|
430
494
|
|
|
431
495
|
def __init__(self, registry: _Registry, logger: logging.Logger):
|
|
432
496
|
self._registry = registry
|
|
433
497
|
self._logger = logger
|
|
434
|
-
self._generator = None
|
|
435
498
|
self._is_suspended = False
|
|
436
|
-
self._suspended_events:
|
|
499
|
+
self._suspended_events: list[pb.HistoryEvent] = []
|
|
437
500
|
|
|
438
|
-
def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) ->
|
|
501
|
+
def execute(self, instance_id: str, old_events: Sequence[pb.HistoryEvent], new_events: Sequence[pb.HistoryEvent]) -> ExecutionResults:
|
|
439
502
|
if not new_events:
|
|
440
503
|
raise task.OrchestrationStateError("The new history event list must have at least one event in it.")
|
|
441
504
|
|
|
@@ -462,7 +525,7 @@ class _OrchestrationExecutor:
|
|
|
462
525
|
if not ctx._is_complete:
|
|
463
526
|
task_count = len(ctx._pending_tasks)
|
|
464
527
|
event_count = len(ctx._pending_events)
|
|
465
|
-
self._logger.info(f"{instance_id}:
|
|
528
|
+
self._logger.info(f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding.")
|
|
466
529
|
elif ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW:
|
|
467
530
|
completion_status_str = pbh.get_orchestration_status_str(ctx._completion_status)
|
|
468
531
|
self._logger.info(f"{instance_id}: Orchestration completed with status: {completion_status_str}")
|
|
@@ -470,7 +533,7 @@ class _OrchestrationExecutor:
|
|
|
470
533
|
actions = ctx.get_actions()
|
|
471
534
|
if self._logger.level <= logging.DEBUG:
|
|
472
535
|
self._logger.debug(f"{instance_id}: Returning {len(actions)} action(s): {_get_action_summary(actions)}")
|
|
473
|
-
return actions
|
|
536
|
+
return ExecutionResults(actions=actions, encoded_custom_status=ctx._encoded_custom_status)
|
|
474
537
|
|
|
475
538
|
def process_event(self, ctx: _RuntimeOrchestrationContext, event: pb.HistoryEvent) -> None:
|
|
476
539
|
if self._is_suspended and _is_suspendable(event):
|
|
@@ -521,12 +584,29 @@ class _OrchestrationExecutor:
|
|
|
521
584
|
f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}.")
|
|
522
585
|
return
|
|
523
586
|
timer_task.complete(None)
|
|
524
|
-
|
|
587
|
+
if timer_task._retryable_parent is not None:
|
|
588
|
+
activity_action = timer_task._retryable_parent._action
|
|
589
|
+
|
|
590
|
+
if not timer_task._retryable_parent._is_sub_orch:
|
|
591
|
+
cur_task = activity_action.scheduleTask
|
|
592
|
+
instance_id = None
|
|
593
|
+
else:
|
|
594
|
+
cur_task = activity_action.createSubOrchestration
|
|
595
|
+
instance_id = cur_task.instanceId
|
|
596
|
+
ctx.call_activity_function_helper(id=activity_action.id, activity_function=cur_task.name,
|
|
597
|
+
input=cur_task.input.value,
|
|
598
|
+
retry_policy=timer_task._retryable_parent._retry_policy,
|
|
599
|
+
is_sub_orch=timer_task._retryable_parent._is_sub_orch,
|
|
600
|
+
instance_id=instance_id,
|
|
601
|
+
fn_task=timer_task._retryable_parent)
|
|
602
|
+
else:
|
|
603
|
+
ctx.resume()
|
|
525
604
|
elif event.HasField("taskScheduled"):
|
|
526
605
|
# This history event confirms that the activity execution was successfully scheduled.
|
|
527
606
|
# Remove the taskScheduled event from the pending action list so we don't schedule it again.
|
|
528
607
|
task_id = event.eventId
|
|
529
608
|
action = ctx._pending_actions.pop(task_id, None)
|
|
609
|
+
activity_task = ctx._pending_tasks.get(task_id, None)
|
|
530
610
|
if not action:
|
|
531
611
|
raise _get_non_determinism_error(task_id, task.get_name(ctx.call_activity))
|
|
532
612
|
elif not action.HasField("scheduleTask"):
|
|
@@ -562,10 +642,25 @@ class _OrchestrationExecutor:
|
|
|
562
642
|
self._logger.warning(
|
|
563
643
|
f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}.")
|
|
564
644
|
return
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
645
|
+
|
|
646
|
+
if isinstance(activity_task, task.RetryableTask):
|
|
647
|
+
if activity_task._retry_policy is not None:
|
|
648
|
+
next_delay = activity_task.compute_next_delay()
|
|
649
|
+
if next_delay is None:
|
|
650
|
+
activity_task.fail(
|
|
651
|
+
f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}",
|
|
652
|
+
event.taskFailed.failureDetails)
|
|
653
|
+
ctx.resume()
|
|
654
|
+
else:
|
|
655
|
+
activity_task.increment_attempt_count()
|
|
656
|
+
ctx.create_timer_internal(next_delay, activity_task)
|
|
657
|
+
elif isinstance(activity_task, task.CompletableTask):
|
|
658
|
+
activity_task.fail(
|
|
659
|
+
f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}",
|
|
660
|
+
event.taskFailed.failureDetails)
|
|
661
|
+
ctx.resume()
|
|
662
|
+
else:
|
|
663
|
+
raise TypeError("Unexpected task type")
|
|
569
664
|
elif event.HasField("subOrchestrationInstanceCreated"):
|
|
570
665
|
# This history event confirms that the sub-orchestration execution was successfully scheduled.
|
|
571
666
|
# Remove the subOrchestrationInstanceCreated event from the pending action list so we don't schedule it again.
|
|
@@ -606,17 +701,31 @@ class _OrchestrationExecutor:
|
|
|
606
701
|
self._logger.warning(
|
|
607
702
|
f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}.")
|
|
608
703
|
return
|
|
609
|
-
sub_orch_task.
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
704
|
+
if isinstance(sub_orch_task, task.RetryableTask):
|
|
705
|
+
if sub_orch_task._retry_policy is not None:
|
|
706
|
+
next_delay = sub_orch_task.compute_next_delay()
|
|
707
|
+
if next_delay is None:
|
|
708
|
+
sub_orch_task.fail(
|
|
709
|
+
f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}",
|
|
710
|
+
failedEvent.failureDetails)
|
|
711
|
+
ctx.resume()
|
|
712
|
+
else:
|
|
713
|
+
sub_orch_task.increment_attempt_count()
|
|
714
|
+
ctx.create_timer_internal(next_delay, sub_orch_task)
|
|
715
|
+
elif isinstance(sub_orch_task, task.CompletableTask):
|
|
716
|
+
sub_orch_task.fail(
|
|
717
|
+
f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}",
|
|
718
|
+
failedEvent.failureDetails)
|
|
719
|
+
ctx.resume()
|
|
720
|
+
else:
|
|
721
|
+
raise TypeError("Unexpected sub-orchestration task type")
|
|
613
722
|
elif event.HasField("eventRaised"):
|
|
614
723
|
# event names are case-insensitive
|
|
615
724
|
event_name = event.eventRaised.name.casefold()
|
|
616
725
|
if not ctx.is_replaying:
|
|
617
726
|
self._logger.info(f"{ctx.instance_id} Event raised: {event_name}")
|
|
618
727
|
task_list = ctx._pending_events.get(event_name, None)
|
|
619
|
-
decoded_result:
|
|
728
|
+
decoded_result: Optional[Any] = None
|
|
620
729
|
if task_list:
|
|
621
730
|
event_task = task_list.pop(0)
|
|
622
731
|
if not ph.is_empty(event.eventRaised.input):
|
|
@@ -665,7 +774,7 @@ class _ActivityExecutor:
|
|
|
665
774
|
self._registry = registry
|
|
666
775
|
self._logger = logger
|
|
667
776
|
|
|
668
|
-
def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input:
|
|
777
|
+
def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: Optional[str]) -> Optional[str]:
|
|
669
778
|
"""Executes an activity function and returns the serialized result, if any."""
|
|
670
779
|
self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...")
|
|
671
780
|
fn = self._registry.get_activity(name)
|
|
@@ -740,7 +849,7 @@ def _get_new_event_summary(new_events: Sequence[pb.HistoryEvent]) -> str:
|
|
|
740
849
|
elif len(new_events) == 1:
|
|
741
850
|
return f"[{new_events[0].WhichOneof('eventType')}]"
|
|
742
851
|
else:
|
|
743
|
-
counts:
|
|
852
|
+
counts: dict[str, int] = {}
|
|
744
853
|
for event in new_events:
|
|
745
854
|
event_type = event.WhichOneof('eventType')
|
|
746
855
|
counts[event_type] = counts.get(event_type, 0) + 1
|
|
@@ -754,7 +863,7 @@ def _get_action_summary(new_actions: Sequence[pb.OrchestratorAction]) -> str:
|
|
|
754
863
|
elif len(new_actions) == 1:
|
|
755
864
|
return f"[{new_actions[0].WhichOneof('orchestratorActionType')}]"
|
|
756
865
|
else:
|
|
757
|
-
counts:
|
|
866
|
+
counts: dict[str, int] = {}
|
|
758
867
|
for action in new_actions:
|
|
759
868
|
action_type = action.WhichOneof('orchestratorActionType')
|
|
760
869
|
counts[action_type] = counts.get(action_type, 0) + 1
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: durabletask
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: A Durable Task Client SDK for Python
|
|
5
|
-
License:
|
|
5
|
+
License: MIT License
|
|
6
6
|
|
|
7
7
|
Copyright (c) Microsoft Corporation.
|
|
8
8
|
|
|
@@ -30,23 +30,21 @@ Keywords: durable,task,workflow
|
|
|
30
30
|
Classifier: Development Status :: 3 - Alpha
|
|
31
31
|
Classifier: Programming Language :: Python :: 3
|
|
32
32
|
Classifier: License :: OSI Approved :: MIT License
|
|
33
|
-
Requires-Python: >=3.
|
|
33
|
+
Requires-Python: >=3.9
|
|
34
34
|
Description-Content-Type: text/markdown
|
|
35
|
-
License-File: LICENSE
|
|
36
35
|
Requires-Dist: grpcio
|
|
37
36
|
|
|
38
|
-
# Durable Task
|
|
37
|
+
# Durable Task SDK for Python
|
|
39
38
|
|
|
40
39
|
[](https://opensource.org/licenses/MIT)
|
|
41
40
|
[](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml)
|
|
42
41
|
[](https://badge.fury.io/py/durabletask)
|
|
43
42
|
|
|
44
|
-
This repo contains a Python
|
|
43
|
+
This repo contains a Python SDK for use with the [Azure Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) and the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code.
|
|
45
44
|
|
|
46
45
|
⚠️ **This SDK is currently under active development and is not yet ready for production use.** ⚠️
|
|
47
46
|
|
|
48
|
-
> Note that this
|
|
49
|
-
|
|
47
|
+
> Note that this SDK is **not** currently compatible with [Azure Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview). If you are looking for a Python SDK for Azure Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python).
|
|
50
48
|
|
|
51
49
|
## Supported patterns
|
|
52
50
|
|
|
@@ -171,7 +169,7 @@ Orchestrations can specify retry policies for activities and sub-orchestrations.
|
|
|
171
169
|
|
|
172
170
|
### Prerequisites
|
|
173
171
|
|
|
174
|
-
- Python 3.
|
|
172
|
+
- Python 3.9
|
|
175
173
|
- A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/)
|
|
176
174
|
|
|
177
175
|
### Installing the Durable Task Python client SDK
|
|
@@ -198,17 +196,12 @@ The following is more information about how to develop this project. Note that d
|
|
|
198
196
|
|
|
199
197
|
### Generating protobufs
|
|
200
198
|
|
|
201
|
-
Protobuf definitions are stored in the [./submodules/durabletask-proto](./submodules/durabletask-proto) directory, which is a submodule. To update the submodule, run the following command from the project root:
|
|
202
|
-
|
|
203
199
|
```sh
|
|
204
|
-
|
|
200
|
+
pip3 install -r dev-requirements.txt
|
|
201
|
+
make gen-proto
|
|
205
202
|
```
|
|
206
203
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
```sh
|
|
210
|
-
make proto-gen
|
|
211
|
-
```
|
|
204
|
+
This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`.
|
|
212
205
|
|
|
213
206
|
### Running unit tests
|
|
214
207
|
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
durabletask/__init__.py,sha256=4gNZ89cYaJTCvWQ1wWu3-BwRD98fWWt9k7hgHSETXU4,139
|
|
2
|
+
durabletask/client.py,sha256=vAm7BtVHeeWFVoiwvOGcrhrkand43oBCCVNnzbNfH6I,10011
|
|
3
|
+
durabletask/task.py,sha256=Brxt-cFqFaIjU07UFLCQoRv7ioycOLcJQbRh9Je_UW4,17722
|
|
4
|
+
durabletask/worker.py,sha256=YPW0XIp0z7UPh9afFaygcS3uR4jgD4FcI2KCXPp0_6M,44583
|
|
5
|
+
durabletask/internal/grpc_interceptor.py,sha256=KGl8GGIbNdiEnWVLwQwkOemWvIlcEO0dh-_Tg20h5XA,2834
|
|
6
|
+
durabletask/internal/helpers.py,sha256=G4nEhLnRUE1VbFHkOMX277_6LSsMH9lTh9sXUD0GdHM,7289
|
|
7
|
+
durabletask/internal/orchestrator_service_pb2.py,sha256=nkADgSglhimtNjAuISJdBz1bwA8xYm1cEQdL9ZifsmU,33993
|
|
8
|
+
durabletask/internal/orchestrator_service_pb2.pyi,sha256=99AIPzz4AdXrkQrN2MHkHkW9zKqmH4puSwvg9ze5IjA,50517
|
|
9
|
+
durabletask/internal/orchestrator_service_pb2_grpc.py,sha256=mZXK0QtvaRr6cjm8gi9y-DjMNR2Xg2Adu79WsR22pQc,41146
|
|
10
|
+
durabletask/internal/shared.py,sha256=dKRGU8z1EQM4_YA6zkKeKfiaWbiZ6-B8lP-wHy7Q_jI,4379
|
|
11
|
+
durabletask-0.2.0.dist-info/METADATA,sha256=khGxeTGfnNAlL4Ei0YqdI52tl0Z42zHkxjUfVxDALOM,12867
|
|
12
|
+
durabletask-0.2.0.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
|
13
|
+
durabletask-0.2.0.dist-info/top_level.txt,sha256=EBVyuKWnjOwq8bJI1Uvb9U3c4fzQxACWj9p83he6fik,12
|
|
14
|
+
durabletask-0.2.0.dist-info/RECORD,,
|
durabletask/internal/__init__.py
DELETED
|
File without changes
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
MIT License
|
|
2
|
-
|
|
3
|
-
Copyright (c) Microsoft Corporation.
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
-
in the Software without restriction, including without limitation the rights
|
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
-
furnished to do so, subject to the following conditions:
|
|
11
|
-
|
|
12
|
-
The above copyright notice and this permission notice shall be included in all
|
|
13
|
-
copies or substantial portions of the Software.
|
|
14
|
-
|
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
-
SOFTWARE
|