durabletask 0.1.0a1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- durabletask/__init__.py +3 -0
- durabletask/client.py +85 -24
- durabletask/entities/__init__.py +13 -0
- durabletask/entities/durable_entity.py +93 -0
- durabletask/entities/entity_context.py +154 -0
- durabletask/entities/entity_instance_id.py +40 -0
- durabletask/entities/entity_lock.py +17 -0
- durabletask/internal/entity_state_shim.py +66 -0
- durabletask/internal/exceptions.py +11 -0
- durabletask/internal/grpc_interceptor.py +65 -0
- durabletask/internal/helpers.py +85 -22
- durabletask/internal/orchestration_entity_context.py +115 -0
- durabletask/internal/orchestrator_service_pb2.py +252 -137
- durabletask/internal/orchestrator_service_pb2.pyi +1144 -0
- durabletask/internal/orchestrator_service_pb2_grpc.py +985 -396
- durabletask/internal/shared.py +46 -8
- durabletask/task.py +276 -43
- durabletask/worker.py +1762 -227
- durabletask-1.0.0.dist-info/METADATA +64 -0
- durabletask-1.0.0.dist-info/RECORD +23 -0
- {durabletask-0.1.0a1.dist-info → durabletask-1.0.0.dist-info}/WHEEL +1 -1
- durabletask/internal/__init__.py +0 -0
- durabletask-0.1.0a1.dist-info/METADATA +0 -257
- durabletask-0.1.0a1.dist-info/RECORD +0 -14
- {durabletask-0.1.0a1.dist-info → durabletask-1.0.0.dist-info/licenses}/LICENSE +0 -0
- {durabletask-0.1.0a1.dist-info → durabletask-1.0.0.dist-info}/top_level.txt +0 -0
durabletask/internal/helpers.py
CHANGED
|
@@ -3,30 +3,33 @@
|
|
|
3
3
|
|
|
4
4
|
import traceback
|
|
5
5
|
from datetime import datetime
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional
|
|
7
7
|
|
|
8
8
|
from google.protobuf import timestamp_pb2, wrappers_pb2
|
|
9
9
|
|
|
10
|
+
from durabletask.entities import EntityInstanceId
|
|
10
11
|
import durabletask.internal.orchestrator_service_pb2 as pb
|
|
11
12
|
|
|
12
13
|
# TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere
|
|
13
14
|
|
|
14
15
|
|
|
15
|
-
def new_orchestrator_started_event(timestamp:
|
|
16
|
+
def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent:
|
|
16
17
|
ts = timestamp_pb2.Timestamp()
|
|
17
18
|
if timestamp is not None:
|
|
18
19
|
ts.FromDatetime(timestamp)
|
|
19
20
|
return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent())
|
|
20
21
|
|
|
21
22
|
|
|
22
|
-
def new_execution_started_event(name: str, instance_id: str, encoded_input:
|
|
23
|
+
def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None,
|
|
24
|
+
tags: Optional[dict[str, str]] = None) -> pb.HistoryEvent:
|
|
23
25
|
return pb.HistoryEvent(
|
|
24
26
|
eventId=-1,
|
|
25
27
|
timestamp=timestamp_pb2.Timestamp(),
|
|
26
28
|
executionStarted=pb.ExecutionStartedEvent(
|
|
27
29
|
name=name,
|
|
28
30
|
input=get_string_value(encoded_input),
|
|
29
|
-
orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id)
|
|
31
|
+
orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id),
|
|
32
|
+
tags=tags))
|
|
30
33
|
|
|
31
34
|
|
|
32
35
|
def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent:
|
|
@@ -49,7 +52,7 @@ def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent:
|
|
|
49
52
|
)
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
def new_task_scheduled_event(event_id: int, name: str, encoded_input:
|
|
55
|
+
def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent:
|
|
53
56
|
return pb.HistoryEvent(
|
|
54
57
|
eventId=event_id,
|
|
55
58
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -57,7 +60,7 @@ def new_task_scheduled_event(event_id: int, name: str, encoded_input: Union[str,
|
|
|
57
60
|
)
|
|
58
61
|
|
|
59
62
|
|
|
60
|
-
def new_task_completed_event(event_id: int, encoded_output:
|
|
63
|
+
def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent:
|
|
61
64
|
return pb.HistoryEvent(
|
|
62
65
|
eventId=-1,
|
|
63
66
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -77,7 +80,7 @@ def new_sub_orchestration_created_event(
|
|
|
77
80
|
event_id: int,
|
|
78
81
|
name: str,
|
|
79
82
|
instance_id: str,
|
|
80
|
-
encoded_input:
|
|
83
|
+
encoded_input: Optional[str] = None) -> pb.HistoryEvent:
|
|
81
84
|
return pb.HistoryEvent(
|
|
82
85
|
eventId=event_id,
|
|
83
86
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -88,7 +91,7 @@ def new_sub_orchestration_created_event(
|
|
|
88
91
|
)
|
|
89
92
|
|
|
90
93
|
|
|
91
|
-
def new_sub_orchestration_completed_event(event_id: int, encoded_output:
|
|
94
|
+
def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent:
|
|
92
95
|
return pb.HistoryEvent(
|
|
93
96
|
eventId=-1,
|
|
94
97
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -116,7 +119,7 @@ def new_failure_details(ex: Exception) -> pb.TaskFailureDetails:
|
|
|
116
119
|
)
|
|
117
120
|
|
|
118
121
|
|
|
119
|
-
def new_event_raised_event(name: str, encoded_input:
|
|
122
|
+
def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent:
|
|
120
123
|
return pb.HistoryEvent(
|
|
121
124
|
eventId=-1,
|
|
122
125
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -140,7 +143,7 @@ def new_resume_event() -> pb.HistoryEvent:
|
|
|
140
143
|
)
|
|
141
144
|
|
|
142
145
|
|
|
143
|
-
def new_terminated_event(*, encoded_output:
|
|
146
|
+
def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent:
|
|
144
147
|
return pb.HistoryEvent(
|
|
145
148
|
eventId=-1,
|
|
146
149
|
timestamp=timestamp_pb2.Timestamp(),
|
|
@@ -150,25 +153,30 @@ def new_terminated_event(*, encoded_output: Union[str, None] = None) -> pb.Histo
|
|
|
150
153
|
)
|
|
151
154
|
|
|
152
155
|
|
|
153
|
-
def get_string_value(val:
|
|
156
|
+
def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]:
|
|
154
157
|
if val is None:
|
|
155
158
|
return None
|
|
156
159
|
else:
|
|
157
160
|
return wrappers_pb2.StringValue(value=val)
|
|
158
161
|
|
|
159
162
|
|
|
163
|
+
def get_string_value_or_empty(val: Optional[str]) -> wrappers_pb2.StringValue:
|
|
164
|
+
if val is None:
|
|
165
|
+
return wrappers_pb2.StringValue(value="")
|
|
166
|
+
return wrappers_pb2.StringValue(value=val)
|
|
167
|
+
|
|
168
|
+
|
|
160
169
|
def new_complete_orchestration_action(
|
|
161
170
|
id: int,
|
|
162
171
|
status: pb.OrchestrationStatus,
|
|
163
|
-
result:
|
|
164
|
-
failure_details:
|
|
165
|
-
|
|
172
|
+
result: Optional[str] = None,
|
|
173
|
+
failure_details: Optional[pb.TaskFailureDetails] = None,
|
|
174
|
+
carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction:
|
|
166
175
|
completeOrchestrationAction = pb.CompleteOrchestrationAction(
|
|
167
176
|
orchestrationStatus=status,
|
|
168
177
|
result=get_string_value(result),
|
|
169
|
-
failureDetails=failure_details
|
|
170
|
-
|
|
171
|
-
# TODO: CarryoverEvents
|
|
178
|
+
failureDetails=failure_details,
|
|
179
|
+
carryoverEvents=carryover_events)
|
|
172
180
|
|
|
173
181
|
return pb.OrchestratorAction(id=id, completeOrchestration=completeOrchestrationAction)
|
|
174
182
|
|
|
@@ -179,13 +187,66 @@ def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction
|
|
|
179
187
|
return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp))
|
|
180
188
|
|
|
181
189
|
|
|
182
|
-
def new_schedule_task_action(id: int, name: str, encoded_input:
|
|
190
|
+
def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str],
|
|
191
|
+
tags: Optional[dict[str, str]]) -> pb.OrchestratorAction:
|
|
183
192
|
return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction(
|
|
184
193
|
name=name,
|
|
185
|
-
input=get_string_value(encoded_input)
|
|
194
|
+
input=get_string_value(encoded_input),
|
|
195
|
+
tags=tags
|
|
186
196
|
))
|
|
187
197
|
|
|
188
198
|
|
|
199
|
+
def new_call_entity_action(id: int, parent_instance_id: str, entity_id: EntityInstanceId, operation: str, encoded_input: Optional[str]):
|
|
200
|
+
return pb.OrchestratorAction(id=id, sendEntityMessage=pb.SendEntityMessageAction(entityOperationCalled=pb.EntityOperationCalledEvent(
|
|
201
|
+
requestId=f"{parent_instance_id}:{id}",
|
|
202
|
+
operation=operation,
|
|
203
|
+
scheduledTime=None,
|
|
204
|
+
input=get_string_value(encoded_input),
|
|
205
|
+
parentInstanceId=get_string_value(parent_instance_id),
|
|
206
|
+
parentExecutionId=None,
|
|
207
|
+
targetInstanceId=get_string_value(str(entity_id)),
|
|
208
|
+
)))
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def new_signal_entity_action(id: int, entity_id: EntityInstanceId, operation: str, encoded_input: Optional[str]):
|
|
212
|
+
return pb.OrchestratorAction(id=id, sendEntityMessage=pb.SendEntityMessageAction(entityOperationSignaled=pb.EntityOperationSignaledEvent(
|
|
213
|
+
requestId=f"{entity_id}:{id}",
|
|
214
|
+
operation=operation,
|
|
215
|
+
scheduledTime=None,
|
|
216
|
+
input=get_string_value(encoded_input),
|
|
217
|
+
targetInstanceId=get_string_value(str(entity_id)),
|
|
218
|
+
)))
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def new_lock_entities_action(id: int, entity_message: pb.SendEntityMessageAction):
|
|
222
|
+
return pb.OrchestratorAction(id=id, sendEntityMessage=entity_message)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def convert_to_entity_batch_request(req: pb.EntityRequest) -> tuple[pb.EntityBatchRequest, list[pb.OperationInfo]]:
|
|
226
|
+
batch_request = pb.EntityBatchRequest(entityState=req.entityState, instanceId=req.instanceId, operations=[])
|
|
227
|
+
|
|
228
|
+
operation_infos: list[pb.OperationInfo] = []
|
|
229
|
+
|
|
230
|
+
for op in req.operationRequests:
|
|
231
|
+
if op.HasField("entityOperationSignaled"):
|
|
232
|
+
batch_request.operations.append(pb.OperationRequest(requestId=op.entityOperationSignaled.requestId,
|
|
233
|
+
operation=op.entityOperationSignaled.operation,
|
|
234
|
+
input=op.entityOperationSignaled.input))
|
|
235
|
+
operation_infos.append(pb.OperationInfo(requestId=op.entityOperationSignaled.requestId,
|
|
236
|
+
responseDestination=None))
|
|
237
|
+
elif op.HasField("entityOperationCalled"):
|
|
238
|
+
batch_request.operations.append(pb.OperationRequest(requestId=op.entityOperationCalled.requestId,
|
|
239
|
+
operation=op.entityOperationCalled.operation,
|
|
240
|
+
input=op.entityOperationCalled.input))
|
|
241
|
+
operation_infos.append(pb.OperationInfo(requestId=op.entityOperationCalled.requestId,
|
|
242
|
+
responseDestination=pb.OrchestrationInstance(
|
|
243
|
+
instanceId=op.entityOperationCalled.parentInstanceId.value,
|
|
244
|
+
executionId=op.entityOperationCalled.parentExecutionId
|
|
245
|
+
)))
|
|
246
|
+
|
|
247
|
+
return batch_request, operation_infos
|
|
248
|
+
|
|
249
|
+
|
|
189
250
|
def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp:
|
|
190
251
|
ts = timestamp_pb2.Timestamp()
|
|
191
252
|
ts.FromDatetime(dt)
|
|
@@ -195,12 +256,14 @@ def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp:
|
|
|
195
256
|
def new_create_sub_orchestration_action(
|
|
196
257
|
id: int,
|
|
197
258
|
name: str,
|
|
198
|
-
instance_id:
|
|
199
|
-
encoded_input:
|
|
259
|
+
instance_id: Optional[str],
|
|
260
|
+
encoded_input: Optional[str],
|
|
261
|
+
version: Optional[str]) -> pb.OrchestratorAction:
|
|
200
262
|
return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction(
|
|
201
263
|
name=name,
|
|
202
264
|
instanceId=instance_id,
|
|
203
|
-
input=get_string_value(encoded_input)
|
|
265
|
+
input=get_string_value(encoded_input),
|
|
266
|
+
version=get_string_value(version)
|
|
204
267
|
))
|
|
205
268
|
|
|
206
269
|
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Generator, List, Optional, Tuple, Union
|
|
3
|
+
|
|
4
|
+
from durabletask.internal.helpers import get_string_value
|
|
5
|
+
import durabletask.internal.orchestrator_service_pb2 as pb
|
|
6
|
+
from durabletask.entities import EntityInstanceId
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OrchestrationEntityContext:
|
|
10
|
+
def __init__(self, instance_id: str):
|
|
11
|
+
self.instance_id = instance_id
|
|
12
|
+
|
|
13
|
+
self.lock_acquisition_pending = False
|
|
14
|
+
|
|
15
|
+
self.critical_section_id = None
|
|
16
|
+
self.critical_section_locks: list[EntityInstanceId] = []
|
|
17
|
+
self.available_locks: list[EntityInstanceId] = []
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def is_inside_critical_section(self) -> bool:
|
|
21
|
+
return self.critical_section_id is not None
|
|
22
|
+
|
|
23
|
+
def get_available_entities(self) -> Generator[EntityInstanceId, None, None]:
|
|
24
|
+
if self.is_inside_critical_section:
|
|
25
|
+
for available_lock in self.available_locks:
|
|
26
|
+
yield available_lock
|
|
27
|
+
|
|
28
|
+
def validate_suborchestration_transition(self) -> Tuple[bool, str]:
|
|
29
|
+
if self.is_inside_critical_section:
|
|
30
|
+
return False, "While holding locks, cannot call suborchestrators."
|
|
31
|
+
return True, ""
|
|
32
|
+
|
|
33
|
+
def validate_operation_transition(self, target_instance_id: EntityInstanceId, one_way: bool) -> Tuple[bool, str]:
|
|
34
|
+
if self.is_inside_critical_section:
|
|
35
|
+
lock_to_use = target_instance_id
|
|
36
|
+
if one_way:
|
|
37
|
+
if target_instance_id in self.critical_section_locks:
|
|
38
|
+
return False, "Must not signal a locked entity from a critical section."
|
|
39
|
+
else:
|
|
40
|
+
try:
|
|
41
|
+
self.available_locks.remove(lock_to_use)
|
|
42
|
+
except ValueError:
|
|
43
|
+
if self.lock_acquisition_pending:
|
|
44
|
+
return False, "Must await the completion of the lock request prior to calling any entity."
|
|
45
|
+
if lock_to_use in self.critical_section_locks:
|
|
46
|
+
return False, "Must not call an entity from a critical section while a prior call to the same entity is still pending."
|
|
47
|
+
else:
|
|
48
|
+
return False, "Must not call an entity from a critical section if it is not one of the locked entities."
|
|
49
|
+
return True, ""
|
|
50
|
+
|
|
51
|
+
def validate_acquire_transition(self) -> Tuple[bool, str]:
|
|
52
|
+
if self.is_inside_critical_section:
|
|
53
|
+
return False, "Must not enter another critical section from within a critical section."
|
|
54
|
+
return True, ""
|
|
55
|
+
|
|
56
|
+
def recover_lock_after_call(self, target_instance_id: EntityInstanceId):
|
|
57
|
+
if self.is_inside_critical_section:
|
|
58
|
+
self.available_locks.append(target_instance_id)
|
|
59
|
+
|
|
60
|
+
def emit_lock_release_messages(self):
|
|
61
|
+
if self.is_inside_critical_section:
|
|
62
|
+
for entity_id in self.critical_section_locks:
|
|
63
|
+
unlock_event = pb.SendEntityMessageAction(entityUnlockSent=pb.EntityUnlockSentEvent(
|
|
64
|
+
criticalSectionId=self.critical_section_id,
|
|
65
|
+
targetInstanceId=get_string_value(str(entity_id)),
|
|
66
|
+
parentInstanceId=get_string_value(self.instance_id)
|
|
67
|
+
))
|
|
68
|
+
yield unlock_event
|
|
69
|
+
|
|
70
|
+
self.critical_section_locks = []
|
|
71
|
+
self.available_locks = []
|
|
72
|
+
self.critical_section_id = None
|
|
73
|
+
|
|
74
|
+
def emit_request_message(self, target, operation_name: str, one_way: bool, operation_id: str,
|
|
75
|
+
scheduled_time_utc: datetime, input: Optional[str],
|
|
76
|
+
request_time: Optional[datetime] = None, create_trace: bool = False):
|
|
77
|
+
raise NotImplementedError()
|
|
78
|
+
|
|
79
|
+
def emit_acquire_message(self, critical_section_id: str, entities: List[EntityInstanceId]) -> Union[Tuple[None, None], Tuple[pb.SendEntityMessageAction, pb.OrchestrationInstance]]:
|
|
80
|
+
if not entities:
|
|
81
|
+
return None, None
|
|
82
|
+
|
|
83
|
+
# Acquire the locks in a globally fixed order to avoid deadlocks
|
|
84
|
+
# Also remove duplicates - this can be optimized for perf if necessary
|
|
85
|
+
entity_ids = sorted(entities)
|
|
86
|
+
entity_ids_dedup = []
|
|
87
|
+
for i, entity_id in enumerate(entity_ids):
|
|
88
|
+
if entity_id != entity_ids[i - 1] if i > 0 else True:
|
|
89
|
+
entity_ids_dedup.append(entity_id)
|
|
90
|
+
|
|
91
|
+
target = pb.OrchestrationInstance(instanceId=str(entity_ids_dedup[0]))
|
|
92
|
+
request = pb.SendEntityMessageAction(entityLockRequested=pb.EntityLockRequestedEvent(
|
|
93
|
+
criticalSectionId=critical_section_id,
|
|
94
|
+
parentInstanceId=get_string_value(self.instance_id),
|
|
95
|
+
lockSet=[str(eid) for eid in entity_ids_dedup],
|
|
96
|
+
position=0,
|
|
97
|
+
))
|
|
98
|
+
|
|
99
|
+
self.critical_section_id = critical_section_id
|
|
100
|
+
self.critical_section_locks = entity_ids_dedup
|
|
101
|
+
self.lock_acquisition_pending = True
|
|
102
|
+
|
|
103
|
+
return request, target
|
|
104
|
+
|
|
105
|
+
def complete_acquire(self, critical_section_id):
|
|
106
|
+
if self.critical_section_id != critical_section_id:
|
|
107
|
+
raise RuntimeError(f"Unexpected lock acquire for critical section ID '{critical_section_id}' (expected '{self.critical_section_id}')")
|
|
108
|
+
self.available_locks = self.critical_section_locks
|
|
109
|
+
self.lock_acquisition_pending = False
|
|
110
|
+
|
|
111
|
+
def adjust_outgoing_message(self, instance_id: str, request_message, capped_time: datetime) -> str:
|
|
112
|
+
raise NotImplementedError()
|
|
113
|
+
|
|
114
|
+
def deserialize_entity_response_event(self, event_content: str):
|
|
115
|
+
raise NotImplementedError()
|