durabletask 0.0.0.dev65__tar.gz → 0.0.0.dev67__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/PKG-INFO +1 -1
  2. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/client.py +171 -11
  3. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/entities/__init__.py +3 -1
  4. durabletask-0.0.0.dev67/durabletask/entities/entity_instance_id.py +84 -0
  5. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/entities/entity_metadata.py +9 -5
  6. durabletask-0.0.0.dev67/durabletask/entities/entity_operation_failed_exception.py +15 -0
  7. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/helpers.py +7 -0
  8. durabletask-0.0.0.dev67/durabletask/internal/json_encode_output_exception.py +12 -0
  9. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/shared.py +8 -5
  10. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/worker.py +57 -20
  11. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask.egg-info/PKG-INFO +1 -1
  12. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask.egg-info/SOURCES.txt +2 -0
  13. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/pyproject.toml +1 -1
  14. durabletask-0.0.0.dev65/durabletask/entities/entity_instance_id.py +0 -42
  15. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/LICENSE +0 -0
  16. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/README.md +0 -0
  17. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/__init__.py +0 -0
  18. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/entities/durable_entity.py +0 -0
  19. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/entities/entity_context.py +0 -0
  20. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/entities/entity_lock.py +0 -0
  21. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/entity_state_shim.py +0 -0
  22. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/exceptions.py +0 -0
  23. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/grpc_interceptor.py +0 -0
  24. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/orchestration_entity_context.py +0 -0
  25. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/orchestrator_service_pb2.py +0 -0
  26. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/orchestrator_service_pb2.pyi +0 -0
  27. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/orchestrator_service_pb2_grpc.py +0 -0
  28. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/internal/proto_task_hub_sidecar_service_stub.py +0 -0
  29. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/py.typed +0 -0
  30. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask/task.py +0 -0
  31. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask.egg-info/dependency_links.txt +0 -0
  32. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask.egg-info/requires.txt +0 -0
  33. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/durabletask.egg-info/top_level.txt +0 -0
  34. {durabletask-0.0.0.dev65 → durabletask-0.0.0.dev67}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: durabletask
3
- Version: 0.0.0.dev65
3
+ Version: 0.0.0.dev67
4
4
  Summary: A Durable Task Client SDK for Python
5
5
  License: MIT License
6
6
 
@@ -6,10 +6,9 @@ import uuid
6
6
  from dataclasses import dataclass
7
7
  from datetime import datetime, timezone
8
8
  from enum import Enum
9
- from typing import Any, Optional, Sequence, TypeVar, Union
9
+ from typing import Any, List, Optional, Sequence, TypeVar, Union
10
10
 
11
11
  import grpc
12
- from google.protobuf import wrappers_pb2
13
12
 
14
13
  from durabletask.entities import EntityInstanceId
15
14
  from durabletask.entities.entity_metadata import EntityMetadata
@@ -57,6 +56,39 @@ class OrchestrationState:
57
56
  self.failure_details)
58
57
 
59
58
 
59
+ @dataclass
60
+ class OrchestrationQuery:
61
+ created_time_from: Optional[datetime] = None
62
+ created_time_to: Optional[datetime] = None
63
+ runtime_status: Optional[List[OrchestrationStatus]] = None
64
+ # Some backends don't respond well with max_instance_count = None, so we use the integer limit for non-paginated
65
+ # results instead.
66
+ max_instance_count: Optional[int] = (1 << 31) - 1
67
+ fetch_inputs_and_outputs: bool = False
68
+
69
+
70
+ @dataclass
71
+ class EntityQuery:
72
+ instance_id_starts_with: Optional[str] = None
73
+ last_modified_from: Optional[datetime] = None
74
+ last_modified_to: Optional[datetime] = None
75
+ include_state: bool = True
76
+ include_transient: bool = False
77
+ page_size: Optional[int] = None
78
+
79
+
80
+ @dataclass
81
+ class PurgeInstancesResult:
82
+ deleted_instance_count: int
83
+ is_complete: bool
84
+
85
+
86
+ @dataclass
87
+ class CleanEntityStorageResult:
88
+ empty_entities_removed: int
89
+ orphaned_locks_released: int
90
+
91
+
60
92
  class OrchestrationFailedError(Exception):
61
93
  def __init__(self, message: str, failure_details: task.FailureDetails):
62
94
  super().__init__(message)
@@ -73,6 +105,12 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Op
73
105
 
74
106
  state = res.orchestrationState
75
107
 
108
+ new_state = parse_orchestration_state(state)
109
+ new_state.instance_id = instance_id # Override instance_id with the one from the request, to match old behavior
110
+ return new_state
111
+
112
+
113
+ def parse_orchestration_state(state: pb.OrchestrationState) -> OrchestrationState:
76
114
  failure_details = None
77
115
  if state.failureDetails.errorMessage != '' or state.failureDetails.errorType != '':
78
116
  failure_details = task.FailureDetails(
@@ -81,7 +119,7 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Op
81
119
  state.failureDetails.stackTrace.value if not helpers.is_empty(state.failureDetails.stackTrace) else None)
82
120
 
83
121
  return OrchestrationState(
84
- instance_id,
122
+ state.instanceId,
85
123
  state.name,
86
124
  OrchestrationStatus(state.orchestrationStatus),
87
125
  state.createdTimestamp.ToDatetime(),
@@ -93,7 +131,6 @@ def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Op
93
131
 
94
132
 
95
133
  class TaskHubGrpcClient:
96
-
97
134
  def __init__(self, *,
98
135
  host_address: Optional[str] = None,
99
136
  metadata: Optional[list[tuple[str, str]]] = None,
@@ -136,7 +173,7 @@ class TaskHubGrpcClient:
136
173
  req = pb.CreateInstanceRequest(
137
174
  name=name,
138
175
  instanceId=instance_id if instance_id else uuid.uuid4().hex,
139
- input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None,
176
+ input=helpers.get_string_value(shared.to_json(input) if input is not None else None),
140
177
  scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None,
141
178
  version=helpers.get_string_value(version if version else self.default_version),
142
179
  orchestrationIdReusePolicy=reuse_id_policy,
@@ -152,6 +189,42 @@ class TaskHubGrpcClient:
152
189
  res: pb.GetInstanceResponse = self._stub.GetInstance(req)
153
190
  return new_orchestration_state(req.instanceId, res)
154
191
 
192
+ def get_all_orchestration_states(self,
193
+ orchestration_query: Optional[OrchestrationQuery] = None
194
+ ) -> List[OrchestrationState]:
195
+ if orchestration_query is None:
196
+ orchestration_query = OrchestrationQuery()
197
+ _continuation_token = None
198
+
199
+ self._logger.info(f"Querying orchestration instances with query: {orchestration_query}")
200
+
201
+ states = []
202
+
203
+ while True:
204
+ req = pb.QueryInstancesRequest(
205
+ query=pb.InstanceQuery(
206
+ runtimeStatus=[status.value for status in orchestration_query.runtime_status] if orchestration_query.runtime_status else None,
207
+ createdTimeFrom=helpers.new_timestamp(orchestration_query.created_time_from) if orchestration_query.created_time_from else None,
208
+ createdTimeTo=helpers.new_timestamp(orchestration_query.created_time_to) if orchestration_query.created_time_to else None,
209
+ maxInstanceCount=orchestration_query.max_instance_count,
210
+ fetchInputsAndOutputs=orchestration_query.fetch_inputs_and_outputs,
211
+ continuationToken=_continuation_token
212
+ )
213
+ )
214
+ resp: pb.QueryInstancesResponse = self._stub.QueryInstances(req)
215
+ states += [parse_orchestration_state(res) for res in resp.orchestrationState]
216
+ # Check the value for continuationToken - none or "0" indicates that there are no more results.
217
+ if resp.continuationToken and resp.continuationToken.value and resp.continuationToken.value != "0":
218
+ self._logger.info(f"Received continuation token with value {resp.continuationToken.value}, fetching next list of instances...")
219
+ if _continuation_token and _continuation_token.value and _continuation_token.value == resp.continuationToken.value:
220
+ self._logger.warning(f"Received the same continuation token value {resp.continuationToken.value} again, stopping to avoid infinite loop.")
221
+ break
222
+ _continuation_token = resp.continuationToken
223
+ else:
224
+ break
225
+
226
+ return states
227
+
155
228
  def wait_for_orchestration_start(self, instance_id: str, *,
156
229
  fetch_payloads: bool = False,
157
230
  timeout: int = 60) -> Optional[OrchestrationState]:
@@ -199,7 +272,8 @@ class TaskHubGrpcClient:
199
272
  req = pb.RaiseEventRequest(
200
273
  instanceId=instance_id,
201
274
  name=event_name,
202
- input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data is not None else None)
275
+ input=helpers.get_string_value(shared.to_json(data) if data is not None else None)
276
+ )
203
277
 
204
278
  self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.")
205
279
  self._stub.RaiseEvent(req)
@@ -209,7 +283,7 @@ class TaskHubGrpcClient:
209
283
  recursive: bool = True):
210
284
  req = pb.TerminateRequest(
211
285
  instanceId=instance_id,
212
- output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output is not None else None,
286
+ output=helpers.get_string_value(shared.to_json(output) if output is not None else None),
213
287
  recursive=recursive)
214
288
 
215
289
  self._logger.info(f"Terminating instance '{instance_id}'.")
@@ -225,10 +299,31 @@ class TaskHubGrpcClient:
225
299
  self._logger.info(f"Resuming instance '{instance_id}'.")
226
300
  self._stub.ResumeInstance(req)
227
301
 
228
- def purge_orchestration(self, instance_id: str, recursive: bool = True):
302
+ def purge_orchestration(self, instance_id: str, recursive: bool = True) -> PurgeInstancesResult:
229
303
  req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive)
230
304
  self._logger.info(f"Purging instance '{instance_id}'.")
231
- self._stub.PurgeInstances(req)
305
+ resp: pb.PurgeInstancesResponse = self._stub.PurgeInstances(req)
306
+ return PurgeInstancesResult(resp.deletedInstanceCount, resp.isComplete.value)
307
+
308
+ def purge_orchestrations_by(self,
309
+ created_time_from: Optional[datetime] = None,
310
+ created_time_to: Optional[datetime] = None,
311
+ runtime_status: Optional[List[OrchestrationStatus]] = None,
312
+ recursive: bool = False) -> PurgeInstancesResult:
313
+ self._logger.info("Purging orchestrations by filter: "
314
+ f"created_time_from={created_time_from}, "
315
+ f"created_time_to={created_time_to}, "
316
+ f"runtime_status={[str(status) for status in runtime_status] if runtime_status else None}, "
317
+ f"recursive={recursive}")
318
+ resp: pb.PurgeInstancesResponse = self._stub.PurgeInstances(pb.PurgeInstancesRequest(
319
+ purgeInstanceFilter=pb.PurgeInstanceFilter(
320
+ createdTimeFrom=helpers.new_timestamp(created_time_from) if created_time_from else None,
321
+ createdTimeTo=helpers.new_timestamp(created_time_to) if created_time_to else None,
322
+ runtimeStatus=[status.value for status in runtime_status] if runtime_status else None
323
+ ),
324
+ recursive=recursive
325
+ ))
326
+ return PurgeInstancesResult(resp.deletedInstanceCount, resp.isComplete.value)
232
327
 
233
328
  def signal_entity(self,
234
329
  entity_instance_id: EntityInstanceId,
@@ -237,7 +332,7 @@ class TaskHubGrpcClient:
237
332
  req = pb.SignalEntityRequest(
238
333
  instanceId=str(entity_instance_id),
239
334
  name=operation_name,
240
- input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None,
335
+ input=helpers.get_string_value(shared.to_json(input) if input is not None else None),
241
336
  requestId=str(uuid.uuid4()),
242
337
  scheduledTime=None,
243
338
  parentTraceContext=None,
@@ -256,4 +351,69 @@ class TaskHubGrpcClient:
256
351
  if not res.exists:
257
352
  return None
258
353
 
259
- return EntityMetadata.from_entity_response(res, include_state)
354
+ return EntityMetadata.from_entity_metadata(res.entity, include_state)
355
+
356
+ def get_all_entities(self,
357
+ entity_query: Optional[EntityQuery] = None) -> List[EntityMetadata]:
358
+ if entity_query is None:
359
+ entity_query = EntityQuery()
360
+ _continuation_token = None
361
+
362
+ self._logger.info(f"Retrieving entities by filter: {entity_query}")
363
+
364
+ entities = []
365
+
366
+ while True:
367
+ query_request = pb.QueryEntitiesRequest(
368
+ query=pb.EntityQuery(
369
+ instanceIdStartsWith=helpers.get_string_value(entity_query.instance_id_starts_with),
370
+ lastModifiedFrom=helpers.new_timestamp(entity_query.last_modified_from) if entity_query.last_modified_from else None,
371
+ lastModifiedTo=helpers.new_timestamp(entity_query.last_modified_to) if entity_query.last_modified_to else None,
372
+ includeState=entity_query.include_state,
373
+ includeTransient=entity_query.include_transient,
374
+ pageSize=helpers.get_int_value(entity_query.page_size),
375
+ continuationToken=_continuation_token
376
+ )
377
+ )
378
+ resp: pb.QueryEntitiesResponse = self._stub.QueryEntities(query_request)
379
+ entities += [EntityMetadata.from_entity_metadata(entity, query_request.query.includeState) for entity in resp.entities]
380
+ if resp.continuationToken and resp.continuationToken.value and resp.continuationToken.value != "0":
381
+ self._logger.info(f"Received continuation token with value {resp.continuationToken.value}, fetching next page of entities...")
382
+ if _continuation_token and _continuation_token.value and _continuation_token.value == resp.continuationToken.value:
383
+ self._logger.warning(f"Received the same continuation token value {resp.continuationToken.value} again, stopping to avoid infinite loop.")
384
+ break
385
+ _continuation_token = resp.continuationToken
386
+ else:
387
+ break
388
+ return entities
389
+
390
+ def clean_entity_storage(self,
391
+ remove_empty_entities: bool = True,
392
+ release_orphaned_locks: bool = True
393
+ ) -> CleanEntityStorageResult:
394
+ self._logger.info("Cleaning entity storage")
395
+
396
+ empty_entities_removed = 0
397
+ orphaned_locks_released = 0
398
+ _continuation_token = None
399
+
400
+ while True:
401
+ req = pb.CleanEntityStorageRequest(
402
+ removeEmptyEntities=remove_empty_entities,
403
+ releaseOrphanedLocks=release_orphaned_locks,
404
+ continuationToken=_continuation_token
405
+ )
406
+ resp: pb.CleanEntityStorageResponse = self._stub.CleanEntityStorage(req)
407
+ empty_entities_removed += resp.emptyEntitiesRemoved
408
+ orphaned_locks_released += resp.orphanedLocksReleased
409
+
410
+ if resp.continuationToken and resp.continuationToken.value and resp.continuationToken.value != "0":
411
+ self._logger.info(f"Received continuation token with value {resp.continuationToken.value}, cleaning next page...")
412
+ if _continuation_token and _continuation_token.value and _continuation_token.value == resp.continuationToken.value:
413
+ self._logger.warning(f"Received the same continuation token value {resp.continuationToken.value} again, stopping to avoid infinite loop.")
414
+ break
415
+ _continuation_token = resp.continuationToken
416
+ else:
417
+ break
418
+
419
+ return CleanEntityStorageResult(empty_entities_removed, orphaned_locks_released)
@@ -8,7 +8,9 @@ from durabletask.entities.durable_entity import DurableEntity
8
8
  from durabletask.entities.entity_lock import EntityLock
9
9
  from durabletask.entities.entity_context import EntityContext
10
10
  from durabletask.entities.entity_metadata import EntityMetadata
11
+ from durabletask.entities.entity_operation_failed_exception import EntityOperationFailedException
11
12
 
12
- __all__ = ["EntityInstanceId", "DurableEntity", "EntityLock", "EntityContext", "EntityMetadata"]
13
+ __all__ = ["EntityInstanceId", "DurableEntity", "EntityLock", "EntityContext", "EntityMetadata",
14
+ "EntityOperationFailedException"]
13
15
 
14
16
  PACKAGE_NAME = "durabletask.entities"
@@ -0,0 +1,84 @@
1
+ class EntityInstanceId:
2
+ def __init__(self, entity: str, key: str):
3
+ EntityInstanceId.validate_entity_name(entity)
4
+ EntityInstanceId.validate_key(key)
5
+ self.entity = entity.lower()
6
+ self.key = key
7
+
8
+ def __str__(self) -> str:
9
+ return f"@{self.entity}@{self.key}"
10
+
11
+ def __eq__(self, other):
12
+ if not isinstance(other, EntityInstanceId):
13
+ return False
14
+ return self.entity == other.entity and self.key == other.key
15
+
16
+ def __lt__(self, other):
17
+ if not isinstance(other, EntityInstanceId):
18
+ return self < other
19
+ return str(self) < str(other)
20
+
21
+ @staticmethod
22
+ def parse(entity_id: str) -> "EntityInstanceId":
23
+ """Parse a string representation of an entity ID into an EntityInstanceId object.
24
+
25
+ Parameters
26
+ ----------
27
+ entity_id : str
28
+ The string representation of the entity ID, in the format '@entity@key'.
29
+
30
+ Returns
31
+ -------
32
+ EntityInstanceId
33
+ The parsed EntityInstanceId object.
34
+
35
+ Raises
36
+ ------
37
+ ValueError
38
+ If the input string is not in the correct format.
39
+ """
40
+ if not entity_id.startswith("@"):
41
+ raise ValueError("Entity ID must start with '@'.")
42
+ try:
43
+ _, entity, key = entity_id.split("@", 2)
44
+ except ValueError as ex:
45
+ raise ValueError(f"Invalid entity ID format: {entity_id}") from ex
46
+ return EntityInstanceId(entity=entity, key=key)
47
+
48
+ @staticmethod
49
+ def validate_entity_name(name: str) -> None:
50
+ """Validate that the entity name does not contain invalid characters.
51
+
52
+ Parameters
53
+ ----------
54
+ name : str
55
+ The entity name to validate.
56
+
57
+ Raises
58
+ ------
59
+ ValueError
60
+ If the name is not a valid entity name.
61
+ """
62
+ if not name:
63
+ raise ValueError("Entity name cannot be empty.")
64
+ if "@" in name:
65
+ raise ValueError("Entity name cannot contain '@' symbol.")
66
+
67
+ @staticmethod
68
+ def validate_key(key: str) -> None:
69
+ """Validate that the entity key does not contain invalid characters.
70
+
71
+ Parameters
72
+ ----------
73
+ key : str
74
+ The entity key to validate.
75
+
76
+ Raises
77
+ ------
78
+ ValueError
79
+ If the key is not a valid entity key.
80
+ """
81
+ if not key:
82
+ raise ValueError("Entity key cannot be empty.")
83
+ if "@" in key:
84
+ raise ValueError("Entity key cannot contain '@' symbol.")
@@ -44,18 +44,22 @@ class EntityMetadata:
44
44
 
45
45
  @staticmethod
46
46
  def from_entity_response(entity_response: pb.GetEntityResponse, includes_state: bool):
47
+ return EntityMetadata.from_entity_metadata(entity_response.entity, includes_state)
48
+
49
+ @staticmethod
50
+ def from_entity_metadata(entity: pb.EntityMetadata, includes_state: bool):
47
51
  try:
48
- entity_id = EntityInstanceId.parse(entity_response.entity.instanceId)
52
+ entity_id = EntityInstanceId.parse(entity.instanceId)
49
53
  except ValueError:
50
54
  raise ValueError("Invalid entity instance ID in entity response.")
51
55
  entity_state = None
52
56
  if includes_state:
53
- entity_state = entity_response.entity.serializedState.value
57
+ entity_state = entity.serializedState.value
54
58
  return EntityMetadata(
55
59
  id=entity_id,
56
- last_modified=entity_response.entity.lastModifiedTime.ToDatetime(timezone.utc),
57
- backlog_queue_size=entity_response.entity.backlogQueueSize,
58
- locked_by=entity_response.entity.lockedBy.value,
60
+ last_modified=entity.lastModifiedTime.ToDatetime(timezone.utc),
61
+ backlog_queue_size=entity.backlogQueueSize,
62
+ locked_by=entity.lockedBy.value,
59
63
  includes_state=includes_state,
60
64
  state=entity_state
61
65
  )
@@ -0,0 +1,15 @@
1
+ from durabletask.internal.orchestrator_service_pb2 import TaskFailureDetails
2
+ from durabletask.entities.entity_instance_id import EntityInstanceId
3
+
4
+
5
+ class EntityOperationFailedException(Exception):
6
+ """Exception raised when an operation on an Entity Function fails."""
7
+
8
+ def __init__(self, entity_instance_id: EntityInstanceId, operation_name: str, failure_details: TaskFailureDetails) -> None:
9
+ super().__init__()
10
+ self.entity_instance_id = entity_instance_id
11
+ self.operation_name = operation_name
12
+ self.failure_details = failure_details
13
+
14
+ def __str__(self) -> str:
15
+ return f"Operation '{self.operation_name}' on entity '{self.entity_instance_id}' failed with error: {self.failure_details.errorMessage}"
@@ -184,6 +184,13 @@ def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]:
184
184
  return wrappers_pb2.StringValue(value=val)
185
185
 
186
186
 
187
+ def get_int_value(val: Optional[int]) -> Optional[wrappers_pb2.Int32Value]:
188
+ if val is None:
189
+ return None
190
+ else:
191
+ return wrappers_pb2.Int32Value(value=val)
192
+
193
+
187
194
  def get_string_value_or_empty(val: Optional[str]) -> wrappers_pb2.StringValue:
188
195
  if val is None:
189
196
  return wrappers_pb2.StringValue(value="")
@@ -0,0 +1,12 @@
1
+ from typing import Any
2
+
3
+
4
+ class JsonEncodeOutputException(Exception):
5
+ """Custom exception type used to indicate that an orchestration result could not be JSON-encoded."""
6
+
7
+ def __init__(self, problem_object: Any):
8
+ super().__init__()
9
+ self.problem_object = problem_object
10
+
11
+ def __str__(self) -> str:
12
+ return f"The orchestration result could not be encoded. Object details: {self.problem_object}"
@@ -103,15 +103,18 @@ class InternalJSONEncoder(json.JSONEncoder):
103
103
  return super().encode(obj)
104
104
 
105
105
  def default(self, obj):
106
- if dataclasses.is_dataclass(obj):
106
+ if dataclasses.is_dataclass(obj) and not isinstance(obj, type):
107
107
  # Dataclasses are not serializable by default, so we convert them to a dict and mark them for
108
- # automatic deserialization by the receiver
109
- d = dataclasses.asdict(obj) # type: ignore
108
+ # automatic deserialization by the receiver. We use a shallow field extraction instead of
109
+ # dataclasses.asdict() so that nested dataclass values are re-processed by the encoder
110
+ # individually (each receiving their own AUTO_SERIALIZED marker).
111
+ d = {f.name: getattr(obj, f.name) for f in dataclasses.fields(obj)}
110
112
  d[AUTO_SERIALIZED] = True
111
113
  return d
112
114
  elif isinstance(obj, SimpleNamespace):
113
- # Most commonly used for serializing custom objects that were previously serialized using our encoder
114
- d = vars(obj)
115
+ # Most commonly used for serializing custom objects that were previously serialized using our encoder.
116
+ # Copy the dict to avoid mutating the original object.
117
+ d = dict(vars(obj))
115
118
  d[AUTO_SERIALIZED] = True
116
119
  return d
117
120
  # This will typically raise a TypeError
@@ -19,10 +19,12 @@ from packaging.version import InvalidVersion, parse
19
19
  import grpc
20
20
  from google.protobuf import empty_pb2
21
21
 
22
+ from durabletask.entities.entity_operation_failed_exception import EntityOperationFailedException
22
23
  from durabletask.internal import helpers
23
24
  from durabletask.internal.entity_state_shim import StateShim
24
25
  from durabletask.internal.helpers import new_timestamp
25
26
  from durabletask.entities import DurableEntity, EntityLock, EntityInstanceId, EntityContext
27
+ from durabletask.internal.json_encode_output_exception import JsonEncodeOutputException
26
28
  from durabletask.internal.orchestration_entity_context import OrchestrationEntityContext
27
29
  from durabletask.internal.proto_task_hub_sidecar_service_stub import ProtoTaskHubSidecarServiceStub
28
30
  import durabletask.internal.helpers as ph
@@ -141,14 +143,12 @@ class _Registry:
141
143
  orchestrators: dict[str, task.Orchestrator]
142
144
  activities: dict[str, task.Activity]
143
145
  entities: dict[str, task.Entity]
144
- entity_instances: dict[str, DurableEntity]
145
146
  versioning: Optional[VersioningOptions] = None
146
147
 
147
148
  def __init__(self):
148
149
  self.orchestrators = {}
149
150
  self.activities = {}
150
151
  self.entities = {}
151
- self.entity_instances = {}
152
152
 
153
153
  def add_orchestrator(self, fn: task.Orchestrator[TInput, TOutput]) -> str:
154
154
  if fn is None:
@@ -199,8 +199,8 @@ class _Registry:
199
199
  return name
200
200
 
201
201
  def add_named_entity(self, name: str, fn: task.Entity) -> None:
202
- if not name:
203
- raise ValueError("A non-empty entity name is required.")
202
+ name = name.lower()
203
+ EntityInstanceId.validate_entity_name(name)
204
204
  if name in self.entities:
205
205
  raise ValueError(f"A '{name}' entity already exists.")
206
206
 
@@ -829,7 +829,7 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
829
829
  self._pending_actions: dict[int, pb.OrchestratorAction] = {}
830
830
  self._pending_tasks: dict[int, task.CompletableTask] = {}
831
831
  # Maps entity ID to task ID
832
- self._entity_task_id_map: dict[str, tuple[EntityInstanceId, int]] = {}
832
+ self._entity_task_id_map: dict[str, tuple[EntityInstanceId, str, int]] = {}
833
833
  self._entity_lock_task_id_map: dict[str, tuple[EntityInstanceId, int]] = {}
834
834
  # Maps criticalSectionId to task ID
835
835
  self._entity_lock_id_map: dict[str, int] = {}
@@ -902,7 +902,13 @@ class _RuntimeOrchestrationContext(task.OrchestrationContext):
902
902
  self._result = result
903
903
  result_json: Optional[str] = None
904
904
  if result is not None:
905
- result_json = result if is_result_encoded else shared.to_json(result)
905
+ try:
906
+ result_json = result if is_result_encoded else shared.to_json(result)
907
+ except (ValueError, TypeError):
908
+ self._is_complete = False
909
+ self._result = None
910
+ self.set_failed(JsonEncodeOutputException(result))
911
+ return
906
912
  action = ph.new_complete_orchestration_action(
907
913
  self.next_sequence_number(), status, result_json
908
914
  )
@@ -1606,7 +1612,7 @@ class _OrchestrationExecutor:
1606
1612
  raise TypeError("Unexpected sub-orchestration task type")
1607
1613
  elif event.HasField("eventRaised"):
1608
1614
  if event.eventRaised.name in ctx._entity_task_id_map:
1609
- entity_id, task_id = ctx._entity_task_id_map.get(event.eventRaised.name, (None, None))
1615
+ entity_id, operation, task_id = ctx._entity_task_id_map.get(event.eventRaised.name, (None, None, None))
1610
1616
  self._handle_entity_event_raised(ctx, event, entity_id, task_id, False)
1611
1617
  elif event.eventRaised.name in ctx._entity_lock_task_id_map:
1612
1618
  entity_id, task_id = ctx._entity_lock_task_id_map.get(event.eventRaised.name, (None, None))
@@ -1680,9 +1686,10 @@ class _OrchestrationExecutor:
1680
1686
  )
1681
1687
  try:
1682
1688
  entity_id = EntityInstanceId.parse(event.entityOperationCalled.targetInstanceId.value)
1689
+ operation = event.entityOperationCalled.operation
1683
1690
  except ValueError:
1684
1691
  raise RuntimeError(f"Could not parse entity ID from targetInstanceId '{event.entityOperationCalled.targetInstanceId.value}'")
1685
- ctx._entity_task_id_map[event.entityOperationCalled.requestId] = (entity_id, entity_call_id)
1692
+ ctx._entity_task_id_map[event.entityOperationCalled.requestId] = (entity_id, operation, entity_call_id)
1686
1693
  elif event.HasField("entityOperationSignaled"):
1687
1694
  # This history event confirms that the entity signal was successfully scheduled.
1688
1695
  # Remove the entityOperationSignaled event from the pending action list so we don't schedule it
@@ -1743,7 +1750,7 @@ class _OrchestrationExecutor:
1743
1750
  ctx.resume()
1744
1751
  elif event.HasField("entityOperationCompleted"):
1745
1752
  request_id = event.entityOperationCompleted.requestId
1746
- entity_id, task_id = ctx._entity_task_id_map.pop(request_id, (None, None))
1753
+ entity_id, operation, task_id = ctx._entity_task_id_map.pop(request_id, (None, None, None))
1747
1754
  if not entity_id:
1748
1755
  raise RuntimeError(f"Could not parse entity ID from request ID '{request_id}'")
1749
1756
  if not task_id:
@@ -1762,10 +1769,29 @@ class _OrchestrationExecutor:
1762
1769
  entity_task.complete(result)
1763
1770
  ctx.resume()
1764
1771
  elif event.HasField("entityOperationFailed"):
1765
- if not ctx.is_replaying:
1766
- self._logger.info(f"{ctx.instance_id}: Entity operation failed.")
1767
- self._logger.info(f"Data: {json.dumps(event.entityOperationFailed)}")
1768
- pass
1772
+ request_id = event.entityOperationFailed.requestId
1773
+ entity_id, operation, task_id = ctx._entity_task_id_map.pop(request_id, (None, None, None))
1774
+ if not entity_id:
1775
+ raise RuntimeError(f"Could not parse entity ID from request ID '{request_id}'")
1776
+ if operation is None:
1777
+ raise RuntimeError(f"Could not parse operation name from request ID '{request_id}'")
1778
+ if not task_id:
1779
+ raise RuntimeError(f"Could not find matching task ID for entity operation with request ID '{request_id}'")
1780
+ entity_task = ctx._pending_tasks.pop(task_id, None)
1781
+ if not entity_task:
1782
+ if not ctx.is_replaying:
1783
+ self._logger.warning(
1784
+ f"{ctx.instance_id}: Ignoring unexpected entityOperationFailed event with request ID = {request_id}."
1785
+ )
1786
+ return
1787
+ failure = EntityOperationFailedException(
1788
+ entity_id,
1789
+ operation,
1790
+ event.entityOperationFailed.failureDetails
1791
+ )
1792
+ ctx._entity_context.recover_lock_after_call(entity_id)
1793
+ entity_task.fail(str(failure), failure)
1794
+ ctx.resume()
1769
1795
  elif event.HasField("orchestratorCompleted"):
1770
1796
  # Added in Functions only (for some reason) and does not affect orchestrator flow
1771
1797
  pass
@@ -1777,7 +1803,7 @@ class _OrchestrationExecutor:
1777
1803
  if action and action.HasField("sendEntityMessage"):
1778
1804
  if action.sendEntityMessage.HasField("entityOperationCalled"):
1779
1805
  entity_id, event_id = self._parse_entity_event_sent_input(event)
1780
- ctx._entity_task_id_map[event_id] = (entity_id, event.eventId)
1806
+ ctx._entity_task_id_map[event_id] = (entity_id, action.sendEntityMessage.entityOperationCalled.operation, event.eventId)
1781
1807
  elif action.sendEntityMessage.HasField("entityLockRequested"):
1782
1808
  entity_id, event_id = self._parse_entity_event_sent_input(event)
1783
1809
  ctx._entity_lock_task_id_map[event_id] = (entity_id, event.eventId)
@@ -1913,6 +1939,7 @@ class _EntityExecutor:
1913
1939
  def __init__(self, registry: _Registry, logger: logging.Logger):
1914
1940
  self._registry = registry
1915
1941
  self._logger = logger
1942
+ self._entity_method_cache: dict[tuple[type, str], bool] = {}
1916
1943
 
1917
1944
  def execute(
1918
1945
  self,
@@ -1936,11 +1963,7 @@ class _EntityExecutor:
1936
1963
  ctx = EntityContext(orchestration_id, operation, state, entity_id)
1937
1964
 
1938
1965
  if isinstance(fn, type) and issubclass(fn, DurableEntity):
1939
- if self._registry.entity_instances.get(str(entity_id), None):
1940
- entity_instance = self._registry.entity_instances[str(entity_id)]
1941
- else:
1942
- entity_instance = fn()
1943
- self._registry.entity_instances[str(entity_id)] = entity_instance
1966
+ entity_instance = fn()
1944
1967
  if not hasattr(entity_instance, operation):
1945
1968
  raise AttributeError(f"Entity '{entity_id}' does not have operation '{operation}'")
1946
1969
  method = getattr(entity_instance, operation)
@@ -1948,7 +1971,21 @@ class _EntityExecutor:
1948
1971
  raise TypeError(f"Entity operation '{operation}' is not callable")
1949
1972
  # Execute the entity method
1950
1973
  entity_instance._initialize_entity_context(ctx)
1951
- entity_output = method(entity_input)
1974
+ cache_key = (type(entity_instance), operation)
1975
+ has_required_param = self._entity_method_cache.get(cache_key)
1976
+ if has_required_param is None:
1977
+ sig = inspect.signature(method)
1978
+ has_required_param = any(
1979
+ p.default == inspect.Parameter.empty
1980
+ for p in sig.parameters.values()
1981
+ if p.kind not in (inspect.Parameter.VAR_POSITIONAL,
1982
+ inspect.Parameter.VAR_KEYWORD)
1983
+ )
1984
+ self._entity_method_cache[cache_key] = has_required_param
1985
+ if has_required_param or entity_input is not None:
1986
+ entity_output = method(entity_input)
1987
+ else:
1988
+ entity_output = method()
1952
1989
  else:
1953
1990
  # Execute the entity function
1954
1991
  entity_output = fn(ctx, entity_input)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: durabletask
3
- Version: 0.0.0.dev65
3
+ Version: 0.0.0.dev67
4
4
  Summary: A Durable Task Client SDK for Python
5
5
  License: MIT License
6
6
 
@@ -17,10 +17,12 @@ durabletask/entities/entity_context.py
17
17
  durabletask/entities/entity_instance_id.py
18
18
  durabletask/entities/entity_lock.py
19
19
  durabletask/entities/entity_metadata.py
20
+ durabletask/entities/entity_operation_failed_exception.py
20
21
  durabletask/internal/entity_state_shim.py
21
22
  durabletask/internal/exceptions.py
22
23
  durabletask/internal/grpc_interceptor.py
23
24
  durabletask/internal/helpers.py
25
+ durabletask/internal/json_encode_output_exception.py
24
26
  durabletask/internal/orchestration_entity_context.py
25
27
  durabletask/internal/orchestrator_service_pb2.py
26
28
  durabletask/internal/orchestrator_service_pb2.pyi
@@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta"
9
9
 
10
10
  [project]
11
11
  name = "durabletask"
12
- version = "0.0.0.dev65"
12
+ version = "0.0.0.dev67"
13
13
  description = "A Durable Task Client SDK for Python"
14
14
  keywords = [
15
15
  "durable",
@@ -1,42 +0,0 @@
1
- class EntityInstanceId:
2
- def __init__(self, entity: str, key: str):
3
- self.entity = entity
4
- self.key = key
5
-
6
- def __str__(self) -> str:
7
- return f"@{self.entity}@{self.key}"
8
-
9
- def __eq__(self, other):
10
- if not isinstance(other, EntityInstanceId):
11
- return False
12
- return self.entity == other.entity and self.key == other.key
13
-
14
- def __lt__(self, other):
15
- if not isinstance(other, EntityInstanceId):
16
- return self < other
17
- return str(self) < str(other)
18
-
19
- @staticmethod
20
- def parse(entity_id: str) -> "EntityInstanceId":
21
- """Parse a string representation of an entity ID into an EntityInstanceId object.
22
-
23
- Parameters
24
- ----------
25
- entity_id : str
26
- The string representation of the entity ID, in the format '@entity@key'.
27
-
28
- Returns
29
- -------
30
- EntityInstanceId
31
- The parsed EntityInstanceId object.
32
-
33
- Raises
34
- ------
35
- ValueError
36
- If the input string is not in the correct format.
37
- """
38
- try:
39
- _, entity, key = entity_id.split("@", 2)
40
- return EntityInstanceId(entity=entity, key=key)
41
- except ValueError as ex:
42
- raise ValueError(f"Invalid entity ID format: {entity_id}", ex)