hatchet-sdk 1.19.0__py3-none-any.whl → 1.20.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hatchet_sdk/clients/dispatcher/action_listener.py +0 -1
- hatchet_sdk/clients/dispatcher/dispatcher.py +0 -30
- hatchet_sdk/clients/rest/__init__.py +11 -3
- hatchet_sdk/clients/rest/api/task_api.py +27 -0
- hatchet_sdk/clients/rest/api/tenant_api.py +345 -0
- hatchet_sdk/clients/rest/api/user_api.py +9 -0
- hatchet_sdk/clients/rest/api/webhook_api.py +323 -8
- hatchet_sdk/clients/rest/api/workflow_api.py +327 -0
- hatchet_sdk/clients/rest/api/workflow_runs_api.py +408 -0
- hatchet_sdk/clients/rest/configuration.py +8 -0
- hatchet_sdk/clients/rest/models/__init__.py +11 -3
- hatchet_sdk/clients/rest/models/create_tenant_request.py +19 -1
- hatchet_sdk/clients/rest/models/registered_workflow.py +86 -0
- hatchet_sdk/clients/rest/models/tenant.py +6 -0
- hatchet_sdk/clients/rest/models/tenant_environment.py +38 -0
- hatchet_sdk/clients/rest/models/update_cron_workflow_trigger_request.py +83 -0
- hatchet_sdk/clients/rest/models/update_tenant_member_request.py +85 -0
- hatchet_sdk/clients/rest/models/v1_filter.py +8 -1
- hatchet_sdk/clients/rest/models/v1_update_webhook_request.py +86 -0
- hatchet_sdk/clients/rest/models/v1_webhook_source_name.py +2 -0
- hatchet_sdk/clients/rest/models/worker.py +22 -0
- hatchet_sdk/features/runs.py +212 -0
- hatchet_sdk/hatchet.py +0 -20
- hatchet_sdk/opentelemetry/instrumentor.py +1 -27
- hatchet_sdk/runnables/action.py +2 -5
- hatchet_sdk/runnables/task.py +0 -1
- hatchet_sdk/utils/iterables.py +9 -0
- hatchet_sdk/utils/opentelemetry.py +0 -1
- hatchet_sdk/worker/action_listener_process.py +0 -29
- hatchet_sdk/worker/runner/runner.py +1 -105
- {hatchet_sdk-1.19.0.dist-info → hatchet_sdk-1.20.1.dist-info}/METADATA +1 -1
- {hatchet_sdk-1.19.0.dist-info → hatchet_sdk-1.20.1.dist-info}/RECORD +34 -28
- {hatchet_sdk-1.19.0.dist-info → hatchet_sdk-1.20.1.dist-info}/WHEEL +0 -0
- {hatchet_sdk-1.19.0.dist-info → hatchet_sdk-1.20.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Hatchet API
|
|
5
|
+
|
|
6
|
+
The Hatchet API
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: 1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import pprint
|
|
19
|
+
import re # noqa: F401
|
|
20
|
+
from typing import Any, ClassVar, Dict, List, Optional, Set
|
|
21
|
+
|
|
22
|
+
from pydantic import BaseModel, ConfigDict, StrictBool
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class UpdateCronWorkflowTriggerRequest(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
UpdateCronWorkflowTriggerRequest
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
|
|
31
|
+
enabled: Optional[StrictBool] = None
|
|
32
|
+
__properties: ClassVar[List[str]] = ["enabled"]
|
|
33
|
+
|
|
34
|
+
model_config = ConfigDict(
|
|
35
|
+
populate_by_name=True,
|
|
36
|
+
validate_assignment=True,
|
|
37
|
+
protected_namespaces=(),
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of UpdateCronWorkflowTriggerRequest from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([])
|
|
65
|
+
|
|
66
|
+
_dict = self.model_dump(
|
|
67
|
+
by_alias=True,
|
|
68
|
+
exclude=excluded_fields,
|
|
69
|
+
exclude_none=True,
|
|
70
|
+
)
|
|
71
|
+
return _dict
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
+
"""Create an instance of UpdateCronWorkflowTriggerRequest from a dict"""
|
|
76
|
+
if obj is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if not isinstance(obj, dict):
|
|
80
|
+
return cls.model_validate(obj)
|
|
81
|
+
|
|
82
|
+
_obj = cls.model_validate({"enabled": obj.get("enabled")})
|
|
83
|
+
return _obj
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Hatchet API
|
|
5
|
+
|
|
6
|
+
The Hatchet API
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: 1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import pprint
|
|
19
|
+
import re # noqa: F401
|
|
20
|
+
from typing import Any, ClassVar, Dict, List, Optional, Set
|
|
21
|
+
|
|
22
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
from hatchet_sdk.clients.rest.models.tenant_member_role import TenantMemberRole
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class UpdateTenantMemberRequest(BaseModel):
|
|
29
|
+
"""
|
|
30
|
+
UpdateTenantMemberRequest
|
|
31
|
+
""" # noqa: E501
|
|
32
|
+
|
|
33
|
+
role: TenantMemberRole = Field(description="The role of the user in the tenant.")
|
|
34
|
+
__properties: ClassVar[List[str]] = ["role"]
|
|
35
|
+
|
|
36
|
+
model_config = ConfigDict(
|
|
37
|
+
populate_by_name=True,
|
|
38
|
+
validate_assignment=True,
|
|
39
|
+
protected_namespaces=(),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
def to_str(self) -> str:
|
|
43
|
+
"""Returns the string representation of the model using alias"""
|
|
44
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
45
|
+
|
|
46
|
+
def to_json(self) -> str:
|
|
47
|
+
"""Returns the JSON representation of the model using alias"""
|
|
48
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
49
|
+
return json.dumps(self.to_dict())
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
53
|
+
"""Create an instance of UpdateTenantMemberRequest from a JSON string"""
|
|
54
|
+
return cls.from_dict(json.loads(json_str))
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Return the dictionary representation of the model using alias.
|
|
58
|
+
|
|
59
|
+
This has the following differences from calling pydantic's
|
|
60
|
+
`self.model_dump(by_alias=True)`:
|
|
61
|
+
|
|
62
|
+
* `None` is only added to the output dict for nullable fields that
|
|
63
|
+
were set at model initialization. Other fields with value `None`
|
|
64
|
+
are ignored.
|
|
65
|
+
"""
|
|
66
|
+
excluded_fields: Set[str] = set([])
|
|
67
|
+
|
|
68
|
+
_dict = self.model_dump(
|
|
69
|
+
by_alias=True,
|
|
70
|
+
exclude=excluded_fields,
|
|
71
|
+
exclude_none=True,
|
|
72
|
+
)
|
|
73
|
+
return _dict
|
|
74
|
+
|
|
75
|
+
@classmethod
|
|
76
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
77
|
+
"""Create an instance of UpdateTenantMemberRequest from a dict"""
|
|
78
|
+
if obj is None:
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
if not isinstance(obj, dict):
|
|
82
|
+
return cls.model_validate(obj)
|
|
83
|
+
|
|
84
|
+
_obj = cls.model_validate({"role": obj.get("role")})
|
|
85
|
+
return _obj
|
|
@@ -19,7 +19,7 @@ import pprint
|
|
|
19
19
|
import re # noqa: F401
|
|
20
20
|
from typing import Any, ClassVar, Dict, List, Optional, Set
|
|
21
21
|
|
|
22
|
-
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
22
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
23
23
|
from typing_extensions import Annotated, Self
|
|
24
24
|
|
|
25
25
|
from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta
|
|
@@ -50,6 +50,11 @@ class V1Filter(BaseModel):
|
|
|
50
50
|
payload: Dict[str, Any] = Field(
|
|
51
51
|
description="Additional payload data associated with the filter"
|
|
52
52
|
)
|
|
53
|
+
is_declarative: Optional[StrictBool] = Field(
|
|
54
|
+
default=None,
|
|
55
|
+
description="Whether the filter is declarative (true) or programmatic (false)",
|
|
56
|
+
alias="isDeclarative",
|
|
57
|
+
)
|
|
53
58
|
__properties: ClassVar[List[str]] = [
|
|
54
59
|
"metadata",
|
|
55
60
|
"tenantId",
|
|
@@ -57,6 +62,7 @@ class V1Filter(BaseModel):
|
|
|
57
62
|
"scope",
|
|
58
63
|
"expression",
|
|
59
64
|
"payload",
|
|
65
|
+
"isDeclarative",
|
|
60
66
|
]
|
|
61
67
|
|
|
62
68
|
model_config = ConfigDict(
|
|
@@ -122,6 +128,7 @@ class V1Filter(BaseModel):
|
|
|
122
128
|
"scope": obj.get("scope"),
|
|
123
129
|
"expression": obj.get("expression"),
|
|
124
130
|
"payload": obj.get("payload"),
|
|
131
|
+
"isDeclarative": obj.get("isDeclarative"),
|
|
125
132
|
}
|
|
126
133
|
)
|
|
127
134
|
return _obj
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Hatchet API
|
|
5
|
+
|
|
6
|
+
The Hatchet API
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: 1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import pprint
|
|
19
|
+
import re # noqa: F401
|
|
20
|
+
from typing import Any, ClassVar, Dict, List, Optional, Set
|
|
21
|
+
|
|
22
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class V1UpdateWebhookRequest(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1UpdateWebhookRequest
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
|
|
31
|
+
event_key_expression: StrictStr = Field(
|
|
32
|
+
description="The CEL expression to use for the event key. This is used to create the event key from the webhook payload.",
|
|
33
|
+
alias="eventKeyExpression",
|
|
34
|
+
)
|
|
35
|
+
__properties: ClassVar[List[str]] = ["eventKeyExpression"]
|
|
36
|
+
|
|
37
|
+
model_config = ConfigDict(
|
|
38
|
+
populate_by_name=True,
|
|
39
|
+
validate_assignment=True,
|
|
40
|
+
protected_namespaces=(),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
def to_str(self) -> str:
|
|
44
|
+
"""Returns the string representation of the model using alias"""
|
|
45
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
46
|
+
|
|
47
|
+
def to_json(self) -> str:
|
|
48
|
+
"""Returns the JSON representation of the model using alias"""
|
|
49
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
50
|
+
return json.dumps(self.to_dict())
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
54
|
+
"""Create an instance of V1UpdateWebhookRequest from a JSON string"""
|
|
55
|
+
return cls.from_dict(json.loads(json_str))
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
58
|
+
"""Return the dictionary representation of the model using alias.
|
|
59
|
+
|
|
60
|
+
This has the following differences from calling pydantic's
|
|
61
|
+
`self.model_dump(by_alias=True)`:
|
|
62
|
+
|
|
63
|
+
* `None` is only added to the output dict for nullable fields that
|
|
64
|
+
were set at model initialization. Other fields with value `None`
|
|
65
|
+
are ignored.
|
|
66
|
+
"""
|
|
67
|
+
excluded_fields: Set[str] = set([])
|
|
68
|
+
|
|
69
|
+
_dict = self.model_dump(
|
|
70
|
+
by_alias=True,
|
|
71
|
+
exclude=excluded_fields,
|
|
72
|
+
exclude_none=True,
|
|
73
|
+
)
|
|
74
|
+
return _dict
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
78
|
+
"""Create an instance of V1UpdateWebhookRequest from a dict"""
|
|
79
|
+
if obj is None:
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
if not isinstance(obj, dict):
|
|
83
|
+
return cls.model_validate(obj)
|
|
84
|
+
|
|
85
|
+
_obj = cls.model_validate({"eventKeyExpression": obj.get("eventKeyExpression")})
|
|
86
|
+
return _obj
|
|
@@ -25,6 +25,7 @@ from typing_extensions import Annotated, Self
|
|
|
25
25
|
|
|
26
26
|
from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta
|
|
27
27
|
from hatchet_sdk.clients.rest.models.recent_step_runs import RecentStepRuns
|
|
28
|
+
from hatchet_sdk.clients.rest.models.registered_workflow import RegisteredWorkflow
|
|
28
29
|
from hatchet_sdk.clients.rest.models.semaphore_slots import SemaphoreSlots
|
|
29
30
|
from hatchet_sdk.clients.rest.models.worker_label import WorkerLabel
|
|
30
31
|
from hatchet_sdk.clients.rest.models.worker_runtime_info import WorkerRuntimeInfo
|
|
@@ -52,6 +53,11 @@ class Worker(BaseModel):
|
|
|
52
53
|
actions: Optional[List[StrictStr]] = Field(
|
|
53
54
|
default=None, description="The actions this worker can perform."
|
|
54
55
|
)
|
|
56
|
+
registered_workflows: Optional[List[RegisteredWorkflow]] = Field(
|
|
57
|
+
default=None,
|
|
58
|
+
description="The workflow ids registered on this worker.",
|
|
59
|
+
alias="registeredWorkflows",
|
|
60
|
+
)
|
|
55
61
|
slots: Optional[List[SemaphoreSlots]] = Field(
|
|
56
62
|
default=None, description="The semaphore slot state for the worker."
|
|
57
63
|
)
|
|
@@ -97,6 +103,7 @@ class Worker(BaseModel):
|
|
|
97
103
|
"lastHeartbeatAt",
|
|
98
104
|
"lastListenerEstablished",
|
|
99
105
|
"actions",
|
|
106
|
+
"registeredWorkflows",
|
|
100
107
|
"slots",
|
|
101
108
|
"recentStepRuns",
|
|
102
109
|
"status",
|
|
@@ -161,6 +168,13 @@ class Worker(BaseModel):
|
|
|
161
168
|
# override the default output from pydantic by calling `to_dict()` of metadata
|
|
162
169
|
if self.metadata:
|
|
163
170
|
_dict["metadata"] = self.metadata.to_dict()
|
|
171
|
+
# override the default output from pydantic by calling `to_dict()` of each item in registered_workflows (list)
|
|
172
|
+
_items = []
|
|
173
|
+
if self.registered_workflows:
|
|
174
|
+
for _item_registered_workflows in self.registered_workflows:
|
|
175
|
+
if _item_registered_workflows:
|
|
176
|
+
_items.append(_item_registered_workflows.to_dict())
|
|
177
|
+
_dict["registeredWorkflows"] = _items
|
|
164
178
|
# override the default output from pydantic by calling `to_dict()` of each item in slots (list)
|
|
165
179
|
_items = []
|
|
166
180
|
if self.slots:
|
|
@@ -208,6 +222,14 @@ class Worker(BaseModel):
|
|
|
208
222
|
"lastHeartbeatAt": obj.get("lastHeartbeatAt"),
|
|
209
223
|
"lastListenerEstablished": obj.get("lastListenerEstablished"),
|
|
210
224
|
"actions": obj.get("actions"),
|
|
225
|
+
"registeredWorkflows": (
|
|
226
|
+
[
|
|
227
|
+
RegisteredWorkflow.from_dict(_item)
|
|
228
|
+
for _item in obj["registeredWorkflows"]
|
|
229
|
+
]
|
|
230
|
+
if obj.get("registeredWorkflows") is not None
|
|
231
|
+
else None
|
|
232
|
+
),
|
|
211
233
|
"slots": (
|
|
212
234
|
[SemaphoreSlots.from_dict(_item) for _item in obj["slots"]]
|
|
213
235
|
if obj.get("slots") is not None
|
hatchet_sdk/features/runs.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import time
|
|
2
3
|
from collections.abc import AsyncIterator
|
|
3
4
|
from datetime import datetime, timedelta, timezone
|
|
4
5
|
from typing import TYPE_CHECKING, Literal, overload
|
|
@@ -32,6 +33,7 @@ from hatchet_sdk.clients.v1.api_client import (
|
|
|
32
33
|
from hatchet_sdk.config import ClientConfig
|
|
33
34
|
from hatchet_sdk.utils.aio import gather_max_concurrency
|
|
34
35
|
from hatchet_sdk.utils.datetimes import partition_date_range
|
|
36
|
+
from hatchet_sdk.utils.iterables import create_chunks
|
|
35
37
|
from hatchet_sdk.utils.typing import JSONSerializableMapping
|
|
36
38
|
|
|
37
39
|
if TYPE_CHECKING:
|
|
@@ -179,6 +181,216 @@ class RunsClient(BaseRestClient):
|
|
|
179
181
|
"""
|
|
180
182
|
return await asyncio.to_thread(self.get_status, workflow_run_id)
|
|
181
183
|
|
|
184
|
+
def _perform_action_with_pagination(
|
|
185
|
+
self,
|
|
186
|
+
action: Literal["cancel", "replay"],
|
|
187
|
+
statuses: list[V1TaskStatus],
|
|
188
|
+
sleep_time: int = 3,
|
|
189
|
+
chunk_size: int = 500,
|
|
190
|
+
since: datetime | None = None,
|
|
191
|
+
until: datetime | None = None,
|
|
192
|
+
additional_metadata: dict[str, str] | None = None,
|
|
193
|
+
workflow_ids: list[str] | None = None,
|
|
194
|
+
) -> None:
|
|
195
|
+
"""
|
|
196
|
+
Perform a bulk action (cancel or replay) on runs matching the specified filters in chunks.
|
|
197
|
+
|
|
198
|
+
The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
|
|
199
|
+
the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
|
|
200
|
+
|
|
201
|
+
This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
|
|
202
|
+
|
|
203
|
+
:param action: The action to perform, either "cancel" or "replay".
|
|
204
|
+
:param statuses: The statuses to filter runs by.
|
|
205
|
+
:param sleep_time: The time to sleep between processing chunks, in seconds.
|
|
206
|
+
:param chunk_size: The maximum number of run IDs to process in each chunk.
|
|
207
|
+
:param since: The start time for filtering runs.
|
|
208
|
+
:param until: The end time for filtering runs.
|
|
209
|
+
:param additional_metadata: Additional metadata to filter runs by.
|
|
210
|
+
:param workflow_ids: The workflow IDs to filter runs by.
|
|
211
|
+
"""
|
|
212
|
+
until = until or datetime.now(tz=timezone.utc)
|
|
213
|
+
since = since or (until - timedelta(days=1))
|
|
214
|
+
|
|
215
|
+
with self.client() as client:
|
|
216
|
+
external_ids = self._wra(client).v1_workflow_run_external_ids_list(
|
|
217
|
+
tenant=self.client_config.tenant_id,
|
|
218
|
+
since=since,
|
|
219
|
+
until=until,
|
|
220
|
+
additional_metadata=maybe_additional_metadata_to_kv(
|
|
221
|
+
additional_metadata
|
|
222
|
+
),
|
|
223
|
+
statuses=statuses,
|
|
224
|
+
workflow_ids=workflow_ids,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
chunks = list(create_chunks(external_ids, chunk_size))
|
|
228
|
+
func = self.bulk_cancel if action == "cancel" else self.bulk_replay
|
|
229
|
+
|
|
230
|
+
for ix, chunk in enumerate(chunks):
|
|
231
|
+
self.client_config.logger.info(
|
|
232
|
+
f"processing chunk {ix + 1}/{len(chunks)} of {len(chunk)} ids" # noqa: G004
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
opts = BulkCancelReplayOpts(ids=chunk)
|
|
236
|
+
func(opts=opts)
|
|
237
|
+
|
|
238
|
+
time.sleep(sleep_time)
|
|
239
|
+
|
|
240
|
+
def bulk_replay_by_filters_with_pagination(
|
|
241
|
+
self,
|
|
242
|
+
sleep_time: int = 3,
|
|
243
|
+
chunk_size: int = 500,
|
|
244
|
+
since: datetime | None = None,
|
|
245
|
+
until: datetime | None = None,
|
|
246
|
+
statuses: list[V1TaskStatus] | None = None,
|
|
247
|
+
additional_metadata: dict[str, str] | None = None,
|
|
248
|
+
workflow_ids: list[str] | None = None,
|
|
249
|
+
) -> None:
|
|
250
|
+
"""
|
|
251
|
+
Replay runs matching the specified filters in chunks.
|
|
252
|
+
|
|
253
|
+
The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
|
|
254
|
+
the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
|
|
255
|
+
|
|
256
|
+
This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
|
|
257
|
+
|
|
258
|
+
:param sleep_time: The time to sleep between processing chunks, in seconds.
|
|
259
|
+
:param chunk_size: The maximum number of run IDs to process in each chunk.
|
|
260
|
+
:param since: The start time for filtering runs.
|
|
261
|
+
:param until: The end time for filtering runs.
|
|
262
|
+
:param statuses: The statuses to filter runs by.
|
|
263
|
+
:param additional_metadata: Additional metadata to filter runs by.
|
|
264
|
+
:param workflow_ids: The workflow IDs to filter runs by.
|
|
265
|
+
"""
|
|
266
|
+
|
|
267
|
+
self._perform_action_with_pagination(
|
|
268
|
+
since=since,
|
|
269
|
+
action="replay",
|
|
270
|
+
sleep_time=sleep_time,
|
|
271
|
+
chunk_size=chunk_size,
|
|
272
|
+
until=until,
|
|
273
|
+
statuses=statuses or [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED],
|
|
274
|
+
additional_metadata=additional_metadata,
|
|
275
|
+
workflow_ids=workflow_ids,
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
def bulk_cancel_by_filters_with_pagination(
|
|
279
|
+
self,
|
|
280
|
+
sleep_time: int = 3,
|
|
281
|
+
chunk_size: int = 500,
|
|
282
|
+
since: datetime | None = None,
|
|
283
|
+
until: datetime | None = None,
|
|
284
|
+
statuses: list[V1TaskStatus] | None = None,
|
|
285
|
+
additional_metadata: dict[str, str] | None = None,
|
|
286
|
+
workflow_ids: list[str] | None = None,
|
|
287
|
+
) -> None:
|
|
288
|
+
"""
|
|
289
|
+
Cancel runs matching the specified filters in chunks.
|
|
290
|
+
|
|
291
|
+
The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
|
|
292
|
+
the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
|
|
293
|
+
|
|
294
|
+
This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
|
|
295
|
+
|
|
296
|
+
:param sleep_time: The time to sleep between processing chunks, in seconds.
|
|
297
|
+
:param chunk_size: The maximum number of run IDs to process in each chunk.
|
|
298
|
+
:param since: The start time for filtering runs.
|
|
299
|
+
:param until: The end time for filtering runs.
|
|
300
|
+
:param statuses: The statuses to filter runs by.
|
|
301
|
+
:param additional_metadata: Additional metadata to filter runs by.
|
|
302
|
+
:param workflow_ids: The workflow IDs to filter runs by.
|
|
303
|
+
"""
|
|
304
|
+
|
|
305
|
+
self._perform_action_with_pagination(
|
|
306
|
+
since=since,
|
|
307
|
+
action="cancel",
|
|
308
|
+
sleep_time=sleep_time,
|
|
309
|
+
chunk_size=chunk_size,
|
|
310
|
+
until=until,
|
|
311
|
+
statuses=statuses or [V1TaskStatus.RUNNING, V1TaskStatus.QUEUED],
|
|
312
|
+
additional_metadata=additional_metadata,
|
|
313
|
+
workflow_ids=workflow_ids,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
async def aio_bulk_replay_by_filters_with_pagination(
|
|
317
|
+
self,
|
|
318
|
+
sleep_time: int = 3,
|
|
319
|
+
chunk_size: int = 500,
|
|
320
|
+
since: datetime | None = None,
|
|
321
|
+
until: datetime | None = None,
|
|
322
|
+
statuses: list[V1TaskStatus] | None = None,
|
|
323
|
+
additional_metadata: dict[str, str] | None = None,
|
|
324
|
+
workflow_ids: list[str] | None = None,
|
|
325
|
+
) -> None:
|
|
326
|
+
"""
|
|
327
|
+
Replay runs matching the specified filters in chunks.
|
|
328
|
+
|
|
329
|
+
The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
|
|
330
|
+
the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
|
|
331
|
+
|
|
332
|
+
This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
|
|
333
|
+
|
|
334
|
+
:param sleep_time: The time to sleep between processing chunks, in seconds.
|
|
335
|
+
:param chunk_size: The maximum number of run IDs to process in each chunk.
|
|
336
|
+
:param since: The start time for filtering runs.
|
|
337
|
+
:param until: The end time for filtering runs.
|
|
338
|
+
:param statuses: The statuses to filter runs by.
|
|
339
|
+
:param additional_metadata: Additional metadata to filter runs by.
|
|
340
|
+
:param workflow_ids: The workflow IDs to filter runs by.
|
|
341
|
+
"""
|
|
342
|
+
|
|
343
|
+
await asyncio.to_thread(
|
|
344
|
+
self._perform_action_with_pagination,
|
|
345
|
+
since=since,
|
|
346
|
+
action="replay",
|
|
347
|
+
sleep_time=sleep_time,
|
|
348
|
+
chunk_size=chunk_size,
|
|
349
|
+
until=until,
|
|
350
|
+
statuses=statuses or [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED],
|
|
351
|
+
additional_metadata=additional_metadata,
|
|
352
|
+
workflow_ids=workflow_ids,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
async def aio_bulk_cancel_by_filters_with_pagination(
|
|
356
|
+
self,
|
|
357
|
+
sleep_time: int = 3,
|
|
358
|
+
chunk_size: int = 500,
|
|
359
|
+
since: datetime | None = None,
|
|
360
|
+
until: datetime | None = None,
|
|
361
|
+
statuses: list[V1TaskStatus] | None = None,
|
|
362
|
+
additional_metadata: dict[str, str] | None = None,
|
|
363
|
+
workflow_ids: list[str] | None = None,
|
|
364
|
+
) -> None:
|
|
365
|
+
"""
|
|
366
|
+
Cancel runs matching the specified filters in chunks.
|
|
367
|
+
|
|
368
|
+
The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
|
|
369
|
+
the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
|
|
370
|
+
|
|
371
|
+
This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
|
|
372
|
+
|
|
373
|
+
:param sleep_time: The time to sleep between processing chunks, in seconds.
|
|
374
|
+
:param chunk_size: The maximum number of run IDs to process in each chunk.
|
|
375
|
+
:param since: The start time for filtering runs.
|
|
376
|
+
:param until: The end time for filtering runs.
|
|
377
|
+
:param statuses: The statuses to filter runs by.
|
|
378
|
+
:param additional_metadata: Additional metadata to filter runs by.
|
|
379
|
+
:param workflow_ids: The workflow IDs to filter runs by.
|
|
380
|
+
"""
|
|
381
|
+
|
|
382
|
+
await asyncio.to_thread(
|
|
383
|
+
self._perform_action_with_pagination,
|
|
384
|
+
since=since,
|
|
385
|
+
action="cancel",
|
|
386
|
+
sleep_time=sleep_time,
|
|
387
|
+
chunk_size=chunk_size,
|
|
388
|
+
until=until,
|
|
389
|
+
statuses=statuses or [V1TaskStatus.RUNNING, V1TaskStatus.QUEUED],
|
|
390
|
+
additional_metadata=additional_metadata,
|
|
391
|
+
workflow_ids=workflow_ids,
|
|
392
|
+
)
|
|
393
|
+
|
|
182
394
|
@retry
|
|
183
395
|
def list_with_pagination(
|
|
184
396
|
self,
|
hatchet_sdk/hatchet.py
CHANGED
|
@@ -2,7 +2,6 @@ import asyncio
|
|
|
2
2
|
import logging
|
|
3
3
|
from collections.abc import Callable
|
|
4
4
|
from datetime import timedelta
|
|
5
|
-
from functools import cached_property
|
|
6
5
|
from typing import Any, Concatenate, ParamSpec, cast, overload
|
|
7
6
|
|
|
8
7
|
from hatchet_sdk import Context, DurableContext
|
|
@@ -10,7 +9,6 @@ from hatchet_sdk.client import Client
|
|
|
10
9
|
from hatchet_sdk.clients.dispatcher.dispatcher import DispatcherClient
|
|
11
10
|
from hatchet_sdk.clients.events import EventClient
|
|
12
11
|
from hatchet_sdk.clients.listeners.run_event_listener import RunEventListenerClient
|
|
13
|
-
from hatchet_sdk.clients.rest.models.tenant_version import TenantVersion
|
|
14
12
|
from hatchet_sdk.config import ClientConfig
|
|
15
13
|
from hatchet_sdk.features.cel import CELClient
|
|
16
14
|
from hatchet_sdk.features.cron import CronClient
|
|
@@ -65,11 +63,6 @@ class Hatchet:
|
|
|
65
63
|
client if client else Client(config=config or ClientConfig(), debug=debug)
|
|
66
64
|
)
|
|
67
65
|
|
|
68
|
-
if self.tenant_engine_version != TenantVersion.V1:
|
|
69
|
-
logger.warning(
|
|
70
|
-
"🚨⚠️‼️ YOU ARE USING A V0 ENGINE WITH A V1 SDK, WHICH IS NOT SUPPORTED. PLEASE UPGRADE YOUR ENGINE TO V1.🚨⚠️‼️"
|
|
71
|
-
)
|
|
72
|
-
|
|
73
66
|
@property
|
|
74
67
|
def cel(self) -> CELClient:
|
|
75
68
|
"""
|
|
@@ -179,19 +172,6 @@ class Hatchet:
|
|
|
179
172
|
"""
|
|
180
173
|
return self._client.config.namespace
|
|
181
174
|
|
|
182
|
-
@cached_property
|
|
183
|
-
def tenant_engine_version(self) -> TenantVersion:
|
|
184
|
-
"""
|
|
185
|
-
Get the version of the Hatchet engine running in your tenant.
|
|
186
|
-
"""
|
|
187
|
-
try:
|
|
188
|
-
return self._client.tenant.get().version
|
|
189
|
-
except Exception:
|
|
190
|
-
## Nothing we can do here - if this fails, it's probably
|
|
191
|
-
## because they don't have this endpoint yet, so we need to just
|
|
192
|
-
## assume V1 to swallow the warning.
|
|
193
|
-
return TenantVersion.V1
|
|
194
|
-
|
|
195
175
|
def worker(
|
|
196
176
|
self,
|
|
197
177
|
name: str,
|
|
@@ -216,11 +216,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
216
216
|
"worker.runner.runner.Runner.handle_start_step_run",
|
|
217
217
|
self._wrap_handle_start_step_run,
|
|
218
218
|
)
|
|
219
|
-
|
|
220
|
-
hatchet_sdk,
|
|
221
|
-
"worker.runner.runner.Runner.handle_start_group_key_run",
|
|
222
|
-
self._wrap_handle_get_group_key_run,
|
|
223
|
-
)
|
|
219
|
+
|
|
224
220
|
wrap_function_wrapper(
|
|
225
221
|
hatchet_sdk,
|
|
226
222
|
"worker.runner.runner.Runner.handle_cancel_action",
|
|
@@ -310,27 +306,6 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
310
306
|
|
|
311
307
|
return result
|
|
312
308
|
|
|
313
|
-
## IMPORTANT: Keep these types in sync with the wrapped method's signature
|
|
314
|
-
async def _wrap_handle_get_group_key_run(
|
|
315
|
-
self,
|
|
316
|
-
wrapped: Callable[[Action], Coroutine[None, None, Exception | None]],
|
|
317
|
-
instance: Runner,
|
|
318
|
-
args: tuple[Action],
|
|
319
|
-
kwargs: Any,
|
|
320
|
-
) -> Exception | None:
|
|
321
|
-
action = args[0]
|
|
322
|
-
|
|
323
|
-
with self._tracer.start_as_current_span(
|
|
324
|
-
"hatchet.get_group_key_run",
|
|
325
|
-
attributes=action.get_otel_attributes(self.config),
|
|
326
|
-
) as span:
|
|
327
|
-
result = await wrapped(*args, **kwargs)
|
|
328
|
-
|
|
329
|
-
if isinstance(result, Exception):
|
|
330
|
-
span.set_status(StatusCode.ERROR, str(result))
|
|
331
|
-
|
|
332
|
-
return result
|
|
333
|
-
|
|
334
309
|
## IMPORTANT: Keep these types in sync with the wrapped method's signature
|
|
335
310
|
async def _wrap_handle_cancel_action(
|
|
336
311
|
self,
|
|
@@ -719,7 +694,6 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
719
694
|
self.meter_provider = NoOpMeterProvider()
|
|
720
695
|
|
|
721
696
|
unwrap(hatchet_sdk, "worker.runner.runner.Runner.handle_start_step_run")
|
|
722
|
-
unwrap(hatchet_sdk, "worker.runner.runner.Runner.handle_start_group_key_run")
|
|
723
697
|
unwrap(hatchet_sdk, "worker.runner.runner.Runner.handle_cancel_action")
|
|
724
698
|
unwrap(hatchet_sdk, "clients.events.EventClient.push")
|
|
725
699
|
unwrap(hatchet_sdk, "clients.events.EventClient.bulk_push")
|