durabletask 0.1.0a5__py3-none-any.whl → 0.1.1a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of durabletask might be problematic. Click here for more details.
- durabletask/client.py +4 -2
- durabletask/internal/orchestrator_service_pb2.py +176 -129
- durabletask/internal/orchestrator_service_pb2.pyi +826 -0
- durabletask/internal/orchestrator_service_pb2_grpc.py +545 -377
- durabletask/task.py +157 -36
- durabletask/worker.py +141 -64
- {durabletask-0.1.0a5.dist-info → durabletask-0.1.1a1.dist-info}/METADATA +2 -2
- durabletask-0.1.1a1.dist-info/RECORD +16 -0
- {durabletask-0.1.0a5.dist-info → durabletask-0.1.1a1.dist-info}/WHEEL +1 -1
- durabletask-0.1.0a5.dist-info/RECORD +0 -15
- {durabletask-0.1.0a5.dist-info → durabletask-0.1.1a1.dist-info}/LICENSE +0 -0
- {durabletask-0.1.0a5.dist-info → durabletask-0.1.1a1.dist-info}/top_level.txt +0 -0
durabletask/task.py
CHANGED
|
@@ -4,9 +4,11 @@
|
|
|
4
4
|
# See https://peps.python.org/pep-0563/
|
|
5
5
|
from __future__ import annotations
|
|
6
6
|
|
|
7
|
+
import math
|
|
7
8
|
from abc import ABC, abstractmethod
|
|
8
9
|
from datetime import datetime, timedelta
|
|
9
|
-
from typing import Any, Callable, Generator, Generic, List, TypeVar,
|
|
10
|
+
from typing import (Any, Callable, Generator, Generic, List, Optional, TypeVar,
|
|
11
|
+
Union)
|
|
10
12
|
|
|
11
13
|
import durabletask.internal.helpers as pbh
|
|
12
14
|
import durabletask.internal.orchestrator_service_pb2 as pb
|
|
@@ -87,17 +89,18 @@ class OrchestrationContext(ABC):
|
|
|
87
89
|
|
|
88
90
|
@abstractmethod
|
|
89
91
|
def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *,
|
|
90
|
-
input:
|
|
92
|
+
input: Optional[TInput] = None,
|
|
93
|
+
retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]:
|
|
91
94
|
"""Schedule an activity for execution.
|
|
92
95
|
|
|
93
96
|
Parameters
|
|
94
97
|
----------
|
|
95
98
|
activity: Union[Activity[TInput, TOutput], str]
|
|
96
99
|
A reference to the activity function to call.
|
|
97
|
-
input:
|
|
100
|
+
input: Optional[TInput]
|
|
98
101
|
The JSON-serializable input (or None) to pass to the activity.
|
|
99
|
-
|
|
100
|
-
The
|
|
102
|
+
retry_policy: Optional[RetryPolicy]
|
|
103
|
+
The retry policy to use for this activity call.
|
|
101
104
|
|
|
102
105
|
Returns
|
|
103
106
|
-------
|
|
@@ -108,19 +111,22 @@ class OrchestrationContext(ABC):
|
|
|
108
111
|
|
|
109
112
|
@abstractmethod
|
|
110
113
|
def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *,
|
|
111
|
-
input:
|
|
112
|
-
instance_id:
|
|
114
|
+
input: Optional[TInput] = None,
|
|
115
|
+
instance_id: Optional[str] = None,
|
|
116
|
+
retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]:
|
|
113
117
|
"""Schedule sub-orchestrator function for execution.
|
|
114
118
|
|
|
115
119
|
Parameters
|
|
116
120
|
----------
|
|
117
121
|
orchestrator: Orchestrator[TInput, TOutput]
|
|
118
122
|
A reference to the orchestrator function to call.
|
|
119
|
-
input:
|
|
123
|
+
input: Optional[TInput]
|
|
120
124
|
The optional JSON-serializable input to pass to the orchestrator function.
|
|
121
|
-
instance_id:
|
|
125
|
+
instance_id: Optional[str]
|
|
122
126
|
A unique ID to use for the sub-orchestration instance. If not specified, a
|
|
123
127
|
random UUID will be used.
|
|
128
|
+
retry_policy: Optional[RetryPolicy]
|
|
129
|
+
The retry policy to use for this sub-orchestrator call.
|
|
124
130
|
|
|
125
131
|
Returns
|
|
126
132
|
-------
|
|
@@ -162,7 +168,7 @@ class OrchestrationContext(ABC):
|
|
|
162
168
|
|
|
163
169
|
|
|
164
170
|
class FailureDetails:
|
|
165
|
-
def __init__(self, message: str, error_type: str, stack_trace:
|
|
171
|
+
def __init__(self, message: str, error_type: str, stack_trace: Optional[str]):
|
|
166
172
|
self._message = message
|
|
167
173
|
self._error_type = error_type
|
|
168
174
|
self._stack_trace = stack_trace
|
|
@@ -176,7 +182,7 @@ class FailureDetails:
|
|
|
176
182
|
return self._error_type
|
|
177
183
|
|
|
178
184
|
@property
|
|
179
|
-
def stack_trace(self) ->
|
|
185
|
+
def stack_trace(self) -> Optional[str]:
|
|
180
186
|
return self._stack_trace
|
|
181
187
|
|
|
182
188
|
|
|
@@ -206,8 +212,8 @@ class OrchestrationStateError(Exception):
|
|
|
206
212
|
class Task(ABC, Generic[T]):
|
|
207
213
|
"""Abstract base class for asynchronous tasks in a durable orchestration."""
|
|
208
214
|
_result: T
|
|
209
|
-
_exception:
|
|
210
|
-
_parent:
|
|
215
|
+
_exception: Optional[TaskFailedError]
|
|
216
|
+
_parent: Optional[CompositeTask[T]]
|
|
211
217
|
|
|
212
218
|
def __init__(self) -> None:
|
|
213
219
|
super().__init__()
|
|
@@ -261,29 +267,6 @@ class CompositeTask(Task[T]):
|
|
|
261
267
|
def on_child_completed(self, task: Task[T]):
|
|
262
268
|
pass
|
|
263
269
|
|
|
264
|
-
|
|
265
|
-
class CompletableTask(Task[T]):
|
|
266
|
-
|
|
267
|
-
def __init__(self):
|
|
268
|
-
super().__init__()
|
|
269
|
-
|
|
270
|
-
def complete(self, result: T):
|
|
271
|
-
if self._is_complete:
|
|
272
|
-
raise ValueError('The task has already completed.')
|
|
273
|
-
self._result = result
|
|
274
|
-
self._is_complete = True
|
|
275
|
-
if self._parent is not None:
|
|
276
|
-
self._parent.on_child_completed(self)
|
|
277
|
-
|
|
278
|
-
def fail(self, message: str, details: pb.TaskFailureDetails):
|
|
279
|
-
if self._is_complete:
|
|
280
|
-
raise ValueError('The task has already completed.')
|
|
281
|
-
self._exception = TaskFailedError(message, details)
|
|
282
|
-
self._is_complete = True
|
|
283
|
-
if self._parent is not None:
|
|
284
|
-
self._parent.on_child_completed(self)
|
|
285
|
-
|
|
286
|
-
|
|
287
270
|
class WhenAllTask(CompositeTask[List[T]]):
|
|
288
271
|
"""A task that completes when all of its child tasks complete."""
|
|
289
272
|
|
|
@@ -313,6 +296,76 @@ class WhenAllTask(CompositeTask[List[T]]):
|
|
|
313
296
|
return self._completed_tasks
|
|
314
297
|
|
|
315
298
|
|
|
299
|
+
class CompletableTask(Task[T]):
|
|
300
|
+
|
|
301
|
+
def __init__(self):
|
|
302
|
+
super().__init__()
|
|
303
|
+
self._retryable_parent = None
|
|
304
|
+
|
|
305
|
+
def complete(self, result: T):
|
|
306
|
+
if self._is_complete:
|
|
307
|
+
raise ValueError('The task has already completed.')
|
|
308
|
+
self._result = result
|
|
309
|
+
self._is_complete = True
|
|
310
|
+
if self._parent is not None:
|
|
311
|
+
self._parent.on_child_completed(self)
|
|
312
|
+
|
|
313
|
+
def fail(self, message: str, details: pb.TaskFailureDetails):
|
|
314
|
+
if self._is_complete:
|
|
315
|
+
raise ValueError('The task has already completed.')
|
|
316
|
+
self._exception = TaskFailedError(message, details)
|
|
317
|
+
self._is_complete = True
|
|
318
|
+
if self._parent is not None:
|
|
319
|
+
self._parent.on_child_completed(self)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
class RetryableTask(CompletableTask[T]):
|
|
323
|
+
"""A task that can be retried according to a retry policy."""
|
|
324
|
+
|
|
325
|
+
def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction,
|
|
326
|
+
start_time:datetime, is_sub_orch: bool) -> None:
|
|
327
|
+
super().__init__()
|
|
328
|
+
self._action = action
|
|
329
|
+
self._retry_policy = retry_policy
|
|
330
|
+
self._attempt_count = 1
|
|
331
|
+
self._start_time = start_time
|
|
332
|
+
self._is_sub_orch = is_sub_orch
|
|
333
|
+
|
|
334
|
+
def increment_attempt_count(self) -> None:
|
|
335
|
+
self._attempt_count += 1
|
|
336
|
+
|
|
337
|
+
def compute_next_delay(self) -> Union[timedelta, None]:
|
|
338
|
+
if self._attempt_count >= self._retry_policy.max_number_of_attempts:
|
|
339
|
+
return None
|
|
340
|
+
|
|
341
|
+
retry_expiration: datetime = datetime.max
|
|
342
|
+
if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max:
|
|
343
|
+
retry_expiration = self._start_time + self._retry_policy.retry_timeout
|
|
344
|
+
|
|
345
|
+
if self._retry_policy.backoff_coefficient is None:
|
|
346
|
+
backoff_coefficient = 1.0
|
|
347
|
+
else:
|
|
348
|
+
backoff_coefficient = self._retry_policy.backoff_coefficient
|
|
349
|
+
|
|
350
|
+
if datetime.utcnow() < retry_expiration:
|
|
351
|
+
next_delay_f = math.pow(backoff_coefficient, self._attempt_count - 1) * self._retry_policy.first_retry_interval.total_seconds()
|
|
352
|
+
|
|
353
|
+
if self._retry_policy.max_retry_interval is not None:
|
|
354
|
+
next_delay_f = min(next_delay_f, self._retry_policy.max_retry_interval.total_seconds())
|
|
355
|
+
return timedelta(seconds=next_delay_f)
|
|
356
|
+
|
|
357
|
+
return None
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
class TimerTask(CompletableTask[T]):
|
|
361
|
+
|
|
362
|
+
def __init__(self) -> None:
|
|
363
|
+
super().__init__()
|
|
364
|
+
|
|
365
|
+
def set_retryable_parent(self, retryable_task: RetryableTask):
|
|
366
|
+
self._retryable_parent = retryable_task
|
|
367
|
+
|
|
368
|
+
|
|
316
369
|
class WhenAnyTask(CompositeTask[Task]):
|
|
317
370
|
"""A task that completes when any of its child tasks complete."""
|
|
318
371
|
|
|
@@ -376,6 +429,74 @@ Orchestrator = Callable[[OrchestrationContext, TInput], Union[Generator[Task, An
|
|
|
376
429
|
Activity = Callable[[ActivityContext, TInput], TOutput]
|
|
377
430
|
|
|
378
431
|
|
|
432
|
+
class RetryPolicy:
|
|
433
|
+
"""Represents the retry policy for an orchestration or activity function."""
|
|
434
|
+
|
|
435
|
+
def __init__(self, *,
|
|
436
|
+
first_retry_interval: timedelta,
|
|
437
|
+
max_number_of_attempts: int,
|
|
438
|
+
backoff_coefficient: Optional[float] = 1.0,
|
|
439
|
+
max_retry_interval: Optional[timedelta] = None,
|
|
440
|
+
retry_timeout: Optional[timedelta] = None):
|
|
441
|
+
"""Creates a new RetryPolicy instance.
|
|
442
|
+
|
|
443
|
+
Parameters
|
|
444
|
+
----------
|
|
445
|
+
first_retry_interval : timedelta
|
|
446
|
+
The retry interval to use for the first retry attempt.
|
|
447
|
+
max_number_of_attempts : int
|
|
448
|
+
The maximum number of retry attempts.
|
|
449
|
+
backoff_coefficient : Optional[float]
|
|
450
|
+
The backoff coefficient to use for calculating the next retry interval.
|
|
451
|
+
max_retry_interval : Optional[timedelta]
|
|
452
|
+
The maximum retry interval to use for any retry attempt.
|
|
453
|
+
retry_timeout : Optional[timedelta]
|
|
454
|
+
The maximum amount of time to spend retrying the operation.
|
|
455
|
+
"""
|
|
456
|
+
# validate inputs
|
|
457
|
+
if first_retry_interval < timedelta(seconds=0):
|
|
458
|
+
raise ValueError('first_retry_interval must be >= 0')
|
|
459
|
+
if max_number_of_attempts < 1:
|
|
460
|
+
raise ValueError('max_number_of_attempts must be >= 1')
|
|
461
|
+
if backoff_coefficient is not None and backoff_coefficient < 1:
|
|
462
|
+
raise ValueError('backoff_coefficient must be >= 1')
|
|
463
|
+
if max_retry_interval is not None and max_retry_interval < timedelta(seconds=0):
|
|
464
|
+
raise ValueError('max_retry_interval must be >= 0')
|
|
465
|
+
if retry_timeout is not None and retry_timeout < timedelta(seconds=0):
|
|
466
|
+
raise ValueError('retry_timeout must be >= 0')
|
|
467
|
+
|
|
468
|
+
self._first_retry_interval = first_retry_interval
|
|
469
|
+
self._max_number_of_attempts = max_number_of_attempts
|
|
470
|
+
self._backoff_coefficient = backoff_coefficient
|
|
471
|
+
self._max_retry_interval = max_retry_interval
|
|
472
|
+
self._retry_timeout = retry_timeout
|
|
473
|
+
|
|
474
|
+
@property
|
|
475
|
+
def first_retry_interval(self) -> timedelta:
|
|
476
|
+
"""The retry interval to use for the first retry attempt."""
|
|
477
|
+
return self._first_retry_interval
|
|
478
|
+
|
|
479
|
+
@property
|
|
480
|
+
def max_number_of_attempts(self) -> int:
|
|
481
|
+
"""The maximum number of retry attempts."""
|
|
482
|
+
return self._max_number_of_attempts
|
|
483
|
+
|
|
484
|
+
@property
|
|
485
|
+
def backoff_coefficient(self) -> Optional[float]:
|
|
486
|
+
"""The backoff coefficient to use for calculating the next retry interval."""
|
|
487
|
+
return self._backoff_coefficient
|
|
488
|
+
|
|
489
|
+
@property
|
|
490
|
+
def max_retry_interval(self) -> Optional[timedelta]:
|
|
491
|
+
"""The maximum retry interval to use for any retry attempt."""
|
|
492
|
+
return self._max_retry_interval
|
|
493
|
+
|
|
494
|
+
@property
|
|
495
|
+
def retry_timeout(self) -> Optional[timedelta]:
|
|
496
|
+
"""The maximum amount of time to spend retrying the operation."""
|
|
497
|
+
return self._retry_timeout
|
|
498
|
+
|
|
499
|
+
|
|
379
500
|
def get_name(fn: Callable) -> str:
|
|
380
501
|
"""Returns the name of the provided function"""
|
|
381
502
|
name = fn.__name__
|
durabletask/worker.py
CHANGED
|
@@ -6,7 +6,8 @@ import logging
|
|
|
6
6
|
from datetime import datetime, timedelta
|
|
7
7
|
from threading import Event, Thread
|
|
8
8
|
from types import GeneratorType
|
|
9
|
-
from typing import Any, Dict, Generator, List, Sequence, Tuple,
|
|
9
|
+
from typing import (Any, Dict, Generator, List, Optional, Sequence, Tuple,
|
|
10
|
+
TypeVar, Union)
|
|
10
11
|
|
|
11
12
|
import grpc
|
|
12
13
|
from google.protobuf import empty_pb2
|
|
@@ -47,7 +48,7 @@ class _Registry:
|
|
|
47
48
|
|
|
48
49
|
self.orchestrators[name] = fn
|
|
49
50
|
|
|
50
|
-
def get_orchestrator(self, name: str) ->
|
|
51
|
+
def get_orchestrator(self, name: str) -> Optional[task.Orchestrator]:
|
|
51
52
|
return self.orchestrators.get(name)
|
|
52
53
|
|
|
53
54
|
def add_activity(self, fn: task.Activity) -> str:
|
|
@@ -66,7 +67,7 @@ class _Registry:
|
|
|
66
67
|
|
|
67
68
|
self.activities[name] = fn
|
|
68
69
|
|
|
69
|
-
def get_activity(self, name: str) ->
|
|
70
|
+
def get_activity(self, name: str) -> Optional[task.Activity]:
|
|
70
71
|
return self.activities.get(name)
|
|
71
72
|
|
|
72
73
|
|
|
@@ -81,20 +82,19 @@ class ActivityNotRegisteredError(ValueError):
|
|
|
81
82
|
|
|
82
83
|
|
|
83
84
|
class TaskHubGrpcWorker:
|
|
84
|
-
_response_stream:
|
|
85
|
+
_response_stream: Optional[grpc.Future] = None
|
|
85
86
|
|
|
86
87
|
def __init__(self, *,
|
|
87
|
-
host_address:
|
|
88
|
-
metadata:
|
|
89
|
-
log_handler
|
|
90
|
-
log_formatter:
|
|
88
|
+
host_address: Optional[str] = None,
|
|
89
|
+
metadata: Optional[List[Tuple[str, str]]] = None,
|
|
90
|
+
log_handler=None,
|
|
91
|
+
log_formatter: Optional[logging.Formatter] = None,
|
|
91
92
|
secure_channel: bool = False):
|
|
92
93
|
self._registry = _Registry()
|
|
93
94
|
self._host_address = host_address if host_address else shared.get_default_host_address()
|
|
94
95
|
self._metadata = metadata
|
|
95
96
|
self._logger = shared.get_logger("worker", log_handler, log_formatter)
|
|
96
97
|
self._shutdown = Event()
|
|
97
|
-
self._response_stream = None
|
|
98
98
|
self._is_running = False
|
|
99
99
|
self._secure_channel = secure_channel
|
|
100
100
|
|
|
@@ -224,8 +224,8 @@ class TaskHubGrpcWorker:
|
|
|
224
224
|
|
|
225
225
|
|
|
226
226
|
class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
227
|
-
_generator:
|
|
228
|
-
_previous_task:
|
|
227
|
+
_generator: Optional[Generator[task.Task, Any, Any]]
|
|
228
|
+
_previous_task: Optional[task.Task]
|
|
229
229
|
|
|
230
230
|
def __init__(self, instance_id: str):
|
|
231
231
|
self._generator = None
|
|
@@ -237,10 +237,10 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
237
237
|
self._sequence_number = 0
|
|
238
238
|
self._current_utc_datetime = datetime(1000, 1, 1)
|
|
239
239
|
self._instance_id = instance_id
|
|
240
|
-
self._completion_status:
|
|
240
|
+
self._completion_status: Optional[pb.OrchestrationStatus] = None
|
|
241
241
|
self._received_events: Dict[str, List[Any]] = {}
|
|
242
242
|
self._pending_events: Dict[str, List[task.CompletableTask]] = {}
|
|
243
|
-
self._new_input:
|
|
243
|
+
self._new_input: Optional[Any] = None
|
|
244
244
|
self._save_events = False
|
|
245
245
|
|
|
246
246
|
def run(self, generator: Generator[task.Task, Any, Any]):
|
|
@@ -259,22 +259,20 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
259
259
|
# has reached a completed state. The only time this won't be the
|
|
260
260
|
# case is if the user yielded on a WhenAll task and there are still
|
|
261
261
|
# outstanding child tasks that need to be completed.
|
|
262
|
-
|
|
262
|
+
while self._previous_task is not None and self._previous_task.is_complete:
|
|
263
|
+
next_task = None
|
|
263
264
|
if self._previous_task.is_failed:
|
|
264
|
-
# Raise the failure as an exception to the generator.
|
|
265
|
-
# handle the exception or allow it to fail the orchestration.
|
|
266
|
-
self._generator.throw(self._previous_task.get_exception())
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
# If a completed task was returned, then we can keep running the generator function.
|
|
276
|
-
if not self._previous_task.is_complete:
|
|
277
|
-
break
|
|
265
|
+
# Raise the failure as an exception to the generator.
|
|
266
|
+
# The orchestrator can then either handle the exception or allow it to fail the orchestration.
|
|
267
|
+
next_task = self._generator.throw(self._previous_task.get_exception())
|
|
268
|
+
else:
|
|
269
|
+
# Resume the generator with the previous result.
|
|
270
|
+
# This will either return a Task or raise StopIteration if it's done.
|
|
271
|
+
next_task = self._generator.send(self._previous_task.get_result())
|
|
272
|
+
|
|
273
|
+
if not isinstance(next_task, task.Task):
|
|
274
|
+
raise TypeError("The orchestrator generator yielded a non-Task object")
|
|
275
|
+
self._previous_task = next_task
|
|
278
276
|
|
|
279
277
|
def set_complete(self, result: Any, status: pb.OrchestrationStatus, is_result_encoded: bool = False):
|
|
280
278
|
if self._is_complete:
|
|
@@ -285,7 +283,7 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
285
283
|
self._pending_actions.clear() # Cancel any pending actions
|
|
286
284
|
|
|
287
285
|
self._result = result
|
|
288
|
-
result_json:
|
|
286
|
+
result_json: Optional[str] = None
|
|
289
287
|
if result is not None:
|
|
290
288
|
result_json = result if is_result_encoded else shared.to_json(result)
|
|
291
289
|
action = ph.new_complete_orchestration_action(
|
|
@@ -318,7 +316,7 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
318
316
|
def get_actions(self) -> List[pb.OrchestratorAction]:
|
|
319
317
|
if self._completion_status == pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW:
|
|
320
318
|
# When continuing-as-new, we only return a single completion action.
|
|
321
|
-
carryover_events:
|
|
319
|
+
carryover_events: Optional[List[pb.HistoryEvent]] = None
|
|
322
320
|
if self._save_events:
|
|
323
321
|
carryover_events = []
|
|
324
322
|
# We need to save the current set of pending events so that they can be
|
|
@@ -358,43 +356,77 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
358
356
|
self._current_utc_datetime = value
|
|
359
357
|
|
|
360
358
|
def create_timer(self, fire_at: Union[datetime, timedelta]) -> task.Task:
|
|
359
|
+
return self.create_timer_internal(fire_at)
|
|
360
|
+
|
|
361
|
+
def create_timer_internal(self, fire_at: Union[datetime, timedelta],
|
|
362
|
+
retryable_task: Optional[task.RetryableTask] = None) -> task.Task:
|
|
361
363
|
id = self.next_sequence_number()
|
|
362
364
|
if isinstance(fire_at, timedelta):
|
|
363
365
|
fire_at = self.current_utc_datetime + fire_at
|
|
364
366
|
action = ph.new_create_timer_action(id, fire_at)
|
|
365
367
|
self._pending_actions[id] = action
|
|
366
368
|
|
|
367
|
-
timer_task = task.
|
|
369
|
+
timer_task = task.TimerTask()
|
|
370
|
+
if retryable_task is not None:
|
|
371
|
+
timer_task.set_retryable_parent(retryable_task)
|
|
368
372
|
self._pending_tasks[id] = timer_task
|
|
369
373
|
return timer_task
|
|
370
374
|
|
|
371
375
|
def call_activity(self, activity: Union[task.Activity[TInput, TOutput], str], *,
|
|
372
|
-
input:
|
|
376
|
+
input: Optional[TInput] = None,
|
|
377
|
+
retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]:
|
|
373
378
|
id = self.next_sequence_number()
|
|
374
|
-
name = activity if isinstance(activity, str) else task.get_name(activity)
|
|
375
|
-
encoded_input = shared.to_json(input) if input is not None else None
|
|
376
|
-
action = ph.new_schedule_task_action(id, name, encoded_input)
|
|
377
|
-
self._pending_actions[id] = action
|
|
378
379
|
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
return
|
|
380
|
+
self.call_activity_function_helper(id, activity, input=input, retry_policy=retry_policy,
|
|
381
|
+
is_sub_orch=False)
|
|
382
|
+
return self._pending_tasks.get(id, task.CompletableTask())
|
|
382
383
|
|
|
383
384
|
def call_sub_orchestrator(self, orchestrator: task.Orchestrator[TInput, TOutput], *,
|
|
384
|
-
input:
|
|
385
|
-
instance_id:
|
|
385
|
+
input: Optional[TInput] = None,
|
|
386
|
+
instance_id: Optional[str] = None,
|
|
387
|
+
retry_policy: Optional[task.RetryPolicy] = None) -> task.Task[TOutput]:
|
|
386
388
|
id = self.next_sequence_number()
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
389
|
+
orchestrator_name = task.get_name(orchestrator)
|
|
390
|
+
self.call_activity_function_helper(id, orchestrator_name, input=input, retry_policy=retry_policy,
|
|
391
|
+
is_sub_orch=True, instance_id=instance_id)
|
|
392
|
+
return self._pending_tasks.get(id, task.CompletableTask())
|
|
393
|
+
|
|
394
|
+
def call_activity_function_helper(self, id: Optional[int],
|
|
395
|
+
activity_function: Union[task.Activity[TInput, TOutput], str], *,
|
|
396
|
+
input: Optional[TInput] = None,
|
|
397
|
+
retry_policy: Optional[task.RetryPolicy] = None,
|
|
398
|
+
is_sub_orch: bool = False,
|
|
399
|
+
instance_id: Optional[str] = None,
|
|
400
|
+
fn_task: Optional[task.CompletableTask[TOutput]] = None):
|
|
401
|
+
if id is None:
|
|
402
|
+
id = self.next_sequence_number()
|
|
403
|
+
|
|
404
|
+
if fn_task is None:
|
|
405
|
+
encoded_input = shared.to_json(input) if input is not None else None
|
|
406
|
+
else:
|
|
407
|
+
# Here, we don't need to convert the input to JSON because it is already converted.
|
|
408
|
+
# We just need to take string representation of it.
|
|
409
|
+
encoded_input = str(input)
|
|
410
|
+
if not is_sub_orch:
|
|
411
|
+
name = activity_function if isinstance(activity_function, str) else task.get_name(activity_function)
|
|
412
|
+
action = ph.new_schedule_task_action(id, name, encoded_input)
|
|
413
|
+
else:
|
|
414
|
+
if instance_id is None:
|
|
415
|
+
# Create a deteministic instance ID based on the parent instance ID
|
|
416
|
+
instance_id = f"{self.instance_id}:{id:04x}"
|
|
417
|
+
if not isinstance(activity_function, str):
|
|
418
|
+
raise ValueError("Orchestrator function name must be a string")
|
|
419
|
+
action = ph.new_create_sub_orchestration_action(id, activity_function, instance_id, encoded_input)
|
|
393
420
|
self._pending_actions[id] = action
|
|
394
421
|
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
422
|
+
if fn_task is None:
|
|
423
|
+
if retry_policy is None:
|
|
424
|
+
fn_task = task.CompletableTask[TOutput]()
|
|
425
|
+
else:
|
|
426
|
+
fn_task = task.RetryableTask[TOutput](retry_policy=retry_policy, action=action,
|
|
427
|
+
start_time=self.current_utc_datetime,
|
|
428
|
+
is_sub_orch=is_sub_orch)
|
|
429
|
+
self._pending_tasks[id] = fn_task
|
|
398
430
|
|
|
399
431
|
def wait_for_external_event(self, name: str) -> task.Task:
|
|
400
432
|
# Check to see if this event has already been received, in which case we
|
|
@@ -426,12 +458,11 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
|
|
|
426
458
|
|
|
427
459
|
|
|
428
460
|
class _OrchestrationExecutor:
|
|
429
|
-
_generator:
|
|
461
|
+
_generator: Optional[task.Orchestrator] = None
|
|
430
462
|
|
|
431
463
|
def __init__(self, registry: _Registry, logger: logging.Logger):
|
|
432
464
|
self._registry = registry
|
|
433
465
|
self._logger = logger
|
|
434
|
-
self._generator = None
|
|
435
466
|
self._is_suspended = False
|
|
436
467
|
self._suspended_events: List[pb.HistoryEvent] = []
|
|
437
468
|
|
|
@@ -462,7 +493,7 @@ class _OrchestrationExecutor:
|
|
|
462
493
|
if not ctx._is_complete:
|
|
463
494
|
task_count = len(ctx._pending_tasks)
|
|
464
495
|
event_count = len(ctx._pending_events)
|
|
465
|
-
self._logger.info(f"{instance_id}:
|
|
496
|
+
self._logger.info(f"{instance_id}: Orchestrator yielded with {task_count} task(s) and {event_count} event(s) outstanding.")
|
|
466
497
|
elif ctx._completion_status and ctx._completion_status is not pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW:
|
|
467
498
|
completion_status_str = pbh.get_orchestration_status_str(ctx._completion_status)
|
|
468
499
|
self._logger.info(f"{instance_id}: Orchestration completed with status: {completion_status_str}")
|
|
@@ -521,12 +552,29 @@ class _OrchestrationExecutor:
|
|
|
521
552
|
f"{ctx.instance_id}: Ignoring unexpected timerFired event with ID = {timer_id}.")
|
|
522
553
|
return
|
|
523
554
|
timer_task.complete(None)
|
|
524
|
-
|
|
555
|
+
if timer_task._retryable_parent is not None:
|
|
556
|
+
activity_action = timer_task._retryable_parent._action
|
|
557
|
+
|
|
558
|
+
if not timer_task._retryable_parent._is_sub_orch:
|
|
559
|
+
cur_task = activity_action.scheduleTask
|
|
560
|
+
instance_id = None
|
|
561
|
+
else:
|
|
562
|
+
cur_task = activity_action.createSubOrchestration
|
|
563
|
+
instance_id = cur_task.instanceId
|
|
564
|
+
ctx.call_activity_function_helper(id=activity_action.id, activity_function=cur_task.name,
|
|
565
|
+
input=cur_task.input.value,
|
|
566
|
+
retry_policy=timer_task._retryable_parent._retry_policy,
|
|
567
|
+
is_sub_orch=timer_task._retryable_parent._is_sub_orch,
|
|
568
|
+
instance_id=instance_id,
|
|
569
|
+
fn_task=timer_task._retryable_parent)
|
|
570
|
+
else:
|
|
571
|
+
ctx.resume()
|
|
525
572
|
elif event.HasField("taskScheduled"):
|
|
526
573
|
# This history event confirms that the activity execution was successfully scheduled.
|
|
527
574
|
# Remove the taskScheduled event from the pending action list so we don't schedule it again.
|
|
528
575
|
task_id = event.eventId
|
|
529
576
|
action = ctx._pending_actions.pop(task_id, None)
|
|
577
|
+
activity_task = ctx._pending_tasks.get(task_id, None)
|
|
530
578
|
if not action:
|
|
531
579
|
raise _get_non_determinism_error(task_id, task.get_name(ctx.call_activity))
|
|
532
580
|
elif not action.HasField("scheduleTask"):
|
|
@@ -562,10 +610,25 @@ class _OrchestrationExecutor:
|
|
|
562
610
|
self._logger.warning(
|
|
563
611
|
f"{ctx.instance_id}: Ignoring unexpected taskFailed event with ID = {task_id}.")
|
|
564
612
|
return
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
613
|
+
|
|
614
|
+
if isinstance(activity_task, task.RetryableTask):
|
|
615
|
+
if activity_task._retry_policy is not None:
|
|
616
|
+
next_delay = activity_task.compute_next_delay()
|
|
617
|
+
if next_delay is None:
|
|
618
|
+
activity_task.fail(
|
|
619
|
+
f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}",
|
|
620
|
+
event.taskFailed.failureDetails)
|
|
621
|
+
ctx.resume()
|
|
622
|
+
else:
|
|
623
|
+
activity_task.increment_attempt_count()
|
|
624
|
+
ctx.create_timer_internal(next_delay, activity_task)
|
|
625
|
+
elif isinstance(activity_task, task.CompletableTask):
|
|
626
|
+
activity_task.fail(
|
|
627
|
+
f"{ctx.instance_id}: Activity task #{task_id} failed: {event.taskFailed.failureDetails.errorMessage}",
|
|
628
|
+
event.taskFailed.failureDetails)
|
|
629
|
+
ctx.resume()
|
|
630
|
+
else:
|
|
631
|
+
raise TypeError("Unexpected task type")
|
|
569
632
|
elif event.HasField("subOrchestrationInstanceCreated"):
|
|
570
633
|
# This history event confirms that the sub-orchestration execution was successfully scheduled.
|
|
571
634
|
# Remove the subOrchestrationInstanceCreated event from the pending action list so we don't schedule it again.
|
|
@@ -606,17 +669,31 @@ class _OrchestrationExecutor:
|
|
|
606
669
|
self._logger.warning(
|
|
607
670
|
f"{ctx.instance_id}: Ignoring unexpected subOrchestrationInstanceFailed event with ID = {task_id}.")
|
|
608
671
|
return
|
|
609
|
-
sub_orch_task.
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
672
|
+
if isinstance(sub_orch_task, task.RetryableTask):
|
|
673
|
+
if sub_orch_task._retry_policy is not None:
|
|
674
|
+
next_delay = sub_orch_task.compute_next_delay()
|
|
675
|
+
if next_delay is None:
|
|
676
|
+
sub_orch_task.fail(
|
|
677
|
+
f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}",
|
|
678
|
+
failedEvent.failureDetails)
|
|
679
|
+
ctx.resume()
|
|
680
|
+
else:
|
|
681
|
+
sub_orch_task.increment_attempt_count()
|
|
682
|
+
ctx.create_timer_internal(next_delay, sub_orch_task)
|
|
683
|
+
elif isinstance(sub_orch_task, task.CompletableTask):
|
|
684
|
+
sub_orch_task.fail(
|
|
685
|
+
f"Sub-orchestration task #{task_id} failed: {failedEvent.failureDetails.errorMessage}",
|
|
686
|
+
failedEvent.failureDetails)
|
|
687
|
+
ctx.resume()
|
|
688
|
+
else:
|
|
689
|
+
raise TypeError("Unexpected sub-orchestration task type")
|
|
613
690
|
elif event.HasField("eventRaised"):
|
|
614
691
|
# event names are case-insensitive
|
|
615
692
|
event_name = event.eventRaised.name.casefold()
|
|
616
693
|
if not ctx.is_replaying:
|
|
617
694
|
self._logger.info(f"{ctx.instance_id} Event raised: {event_name}")
|
|
618
695
|
task_list = ctx._pending_events.get(event_name, None)
|
|
619
|
-
decoded_result:
|
|
696
|
+
decoded_result: Optional[Any] = None
|
|
620
697
|
if task_list:
|
|
621
698
|
event_task = task_list.pop(0)
|
|
622
699
|
if not ph.is_empty(event.eventRaised.input):
|
|
@@ -665,7 +742,7 @@ class _ActivityExecutor:
|
|
|
665
742
|
self._registry = registry
|
|
666
743
|
self._logger = logger
|
|
667
744
|
|
|
668
|
-
def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input:
|
|
745
|
+
def execute(self, orchestration_id: str, name: str, task_id: int, encoded_input: Optional[str]) -> Optional[str]:
|
|
669
746
|
"""Executes an activity function and returns the serialized result, if any."""
|
|
670
747
|
self._logger.debug(f"{orchestration_id}/{task_id}: Executing activity '{name}'...")
|
|
671
748
|
fn = self._registry.get_activity(name)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: durabletask
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1a1
|
|
4
4
|
Summary: A Durable Task Client SDK for Python
|
|
5
5
|
License: MIT License
|
|
6
6
|
|
|
@@ -207,7 +207,7 @@ git submodule update --init
|
|
|
207
207
|
Once the submodule is available, the corresponding source code can be regenerated using the following command from the project root:
|
|
208
208
|
|
|
209
209
|
```sh
|
|
210
|
-
make proto
|
|
210
|
+
make gen-proto
|
|
211
211
|
```
|
|
212
212
|
|
|
213
213
|
### Running unit tests
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
durabletask/__init__.py,sha256=4gNZ89cYaJTCvWQ1wWu3-BwRD98fWWt9k7hgHSETXU4,139
|
|
2
|
+
durabletask/client.py,sha256=UmQv4fYjHmJRju6teJ-7HM0tVjd_Ugty044Dg2s19tI,8872
|
|
3
|
+
durabletask/task.py,sha256=zwnlLLNCeFJmFT7wvpK0MEx-tMxu1_JCYq_OhXYW0S0,17471
|
|
4
|
+
durabletask/worker.py,sha256=FoqWcgP35lzqbbOuQ1pdJ3jMNtvtd7HO-s22OOcRHyA,42926
|
|
5
|
+
durabletask/internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
durabletask/internal/grpc_interceptor.py,sha256=bixPtGQ5KPN2CWAzrfR8z9drwEdM_ZaTZ-tn97DC3LU,2878
|
|
7
|
+
durabletask/internal/helpers.py,sha256=m9lnmQcx5zx3s0DC_Lddugr__O_dgWgHAAAByOmvL_c,7340
|
|
8
|
+
durabletask/internal/orchestrator_service_pb2.py,sha256=N4R75_gT7P05fUsPgLpL130wjws49-SI0w6_BaleWOI,29754
|
|
9
|
+
durabletask/internal/orchestrator_service_pb2.pyi,sha256=HRtK9PTpLudEoFPD08-oWom26quwWMkjpkmjIiWC-bQ,46324
|
|
10
|
+
durabletask/internal/orchestrator_service_pb2_grpc.py,sha256=r0UbaHS2EacF3SdiY5QoWEZgp21XPcSje0lFEG23HcI,37544
|
|
11
|
+
durabletask/internal/shared.py,sha256=1IU_sTwrntyJqMUB63yOtjQd0IqKARiTR_0L-AiM-N8,3645
|
|
12
|
+
durabletask-0.1.1a1.dist-info/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
|
|
13
|
+
durabletask-0.1.1a1.dist-info/METADATA,sha256=t9YiZ6gJHW3I36X99Zx6iWRuNhDPYrHoT-VxVx1FxqQ,12930
|
|
14
|
+
durabletask-0.1.1a1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
15
|
+
durabletask-0.1.1a1.dist-info/top_level.txt,sha256=EBVyuKWnjOwq8bJI1Uvb9U3c4fzQxACWj9p83he6fik,12
|
|
16
|
+
durabletask-0.1.1a1.dist-info/RECORD,,
|