hatchet-sdk 1.20.0__py3-none-any.whl → 1.20.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

@@ -0,0 +1,83 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Hatchet API
5
+
6
+ The Hatchet API
7
+
8
+ The version of the OpenAPI document: 1.0.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+
17
+ import json
18
+ import pprint
19
+ import re # noqa: F401
20
+ from typing import Any, ClassVar, Dict, List, Optional, Set
21
+
22
+ from pydantic import BaseModel, ConfigDict, StrictBool
23
+ from typing_extensions import Self
24
+
25
+
26
+ class UpdateCronWorkflowTriggerRequest(BaseModel):
27
+ """
28
+ UpdateCronWorkflowTriggerRequest
29
+ """ # noqa: E501
30
+
31
+ enabled: Optional[StrictBool] = None
32
+ __properties: ClassVar[List[str]] = ["enabled"]
33
+
34
+ model_config = ConfigDict(
35
+ populate_by_name=True,
36
+ validate_assignment=True,
37
+ protected_namespaces=(),
38
+ )
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of UpdateCronWorkflowTriggerRequest from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([])
65
+
66
+ _dict = self.model_dump(
67
+ by_alias=True,
68
+ exclude=excluded_fields,
69
+ exclude_none=True,
70
+ )
71
+ return _dict
72
+
73
+ @classmethod
74
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
+ """Create an instance of UpdateCronWorkflowTriggerRequest from a dict"""
76
+ if obj is None:
77
+ return None
78
+
79
+ if not isinstance(obj, dict):
80
+ return cls.model_validate(obj)
81
+
82
+ _obj = cls.model_validate({"enabled": obj.get("enabled")})
83
+ return _obj
@@ -0,0 +1,85 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Hatchet API
5
+
6
+ The Hatchet API
7
+
8
+ The version of the OpenAPI document: 1.0.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+
17
+ import json
18
+ import pprint
19
+ import re # noqa: F401
20
+ from typing import Any, ClassVar, Dict, List, Optional, Set
21
+
22
+ from pydantic import BaseModel, ConfigDict, Field
23
+ from typing_extensions import Self
24
+
25
+ from hatchet_sdk.clients.rest.models.tenant_member_role import TenantMemberRole
26
+
27
+
28
+ class UpdateTenantMemberRequest(BaseModel):
29
+ """
30
+ UpdateTenantMemberRequest
31
+ """ # noqa: E501
32
+
33
+ role: TenantMemberRole = Field(description="The role of the user in the tenant.")
34
+ __properties: ClassVar[List[str]] = ["role"]
35
+
36
+ model_config = ConfigDict(
37
+ populate_by_name=True,
38
+ validate_assignment=True,
39
+ protected_namespaces=(),
40
+ )
41
+
42
+ def to_str(self) -> str:
43
+ """Returns the string representation of the model using alias"""
44
+ return pprint.pformat(self.model_dump(by_alias=True))
45
+
46
+ def to_json(self) -> str:
47
+ """Returns the JSON representation of the model using alias"""
48
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
49
+ return json.dumps(self.to_dict())
50
+
51
+ @classmethod
52
+ def from_json(cls, json_str: str) -> Optional[Self]:
53
+ """Create an instance of UpdateTenantMemberRequest from a JSON string"""
54
+ return cls.from_dict(json.loads(json_str))
55
+
56
+ def to_dict(self) -> Dict[str, Any]:
57
+ """Return the dictionary representation of the model using alias.
58
+
59
+ This has the following differences from calling pydantic's
60
+ `self.model_dump(by_alias=True)`:
61
+
62
+ * `None` is only added to the output dict for nullable fields that
63
+ were set at model initialization. Other fields with value `None`
64
+ are ignored.
65
+ """
66
+ excluded_fields: Set[str] = set([])
67
+
68
+ _dict = self.model_dump(
69
+ by_alias=True,
70
+ exclude=excluded_fields,
71
+ exclude_none=True,
72
+ )
73
+ return _dict
74
+
75
+ @classmethod
76
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
77
+ """Create an instance of UpdateTenantMemberRequest from a dict"""
78
+ if obj is None:
79
+ return None
80
+
81
+ if not isinstance(obj, dict):
82
+ return cls.model_validate(obj)
83
+
84
+ _obj = cls.model_validate({"role": obj.get("role")})
85
+ return _obj
@@ -19,7 +19,7 @@ import pprint
19
19
  import re # noqa: F401
20
20
  from typing import Any, ClassVar, Dict, List, Optional, Set
21
21
 
22
- from pydantic import BaseModel, ConfigDict, Field, StrictStr
22
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
23
23
  from typing_extensions import Annotated, Self
24
24
 
25
25
  from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta
@@ -50,6 +50,11 @@ class V1Filter(BaseModel):
50
50
  payload: Dict[str, Any] = Field(
51
51
  description="Additional payload data associated with the filter"
52
52
  )
53
+ is_declarative: Optional[StrictBool] = Field(
54
+ default=None,
55
+ description="Whether the filter is declarative (true) or programmatic (false)",
56
+ alias="isDeclarative",
57
+ )
53
58
  __properties: ClassVar[List[str]] = [
54
59
  "metadata",
55
60
  "tenantId",
@@ -57,6 +62,7 @@ class V1Filter(BaseModel):
57
62
  "scope",
58
63
  "expression",
59
64
  "payload",
65
+ "isDeclarative",
60
66
  ]
61
67
 
62
68
  model_config = ConfigDict(
@@ -122,6 +128,7 @@ class V1Filter(BaseModel):
122
128
  "scope": obj.get("scope"),
123
129
  "expression": obj.get("expression"),
124
130
  "payload": obj.get("payload"),
131
+ "isDeclarative": obj.get("isDeclarative"),
125
132
  }
126
133
  )
127
134
  return _obj
@@ -0,0 +1,86 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Hatchet API
5
+
6
+ The Hatchet API
7
+
8
+ The version of the OpenAPI document: 1.0.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+
17
+ import json
18
+ import pprint
19
+ import re # noqa: F401
20
+ from typing import Any, ClassVar, Dict, List, Optional, Set
21
+
22
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
23
+ from typing_extensions import Self
24
+
25
+
26
+ class V1UpdateWebhookRequest(BaseModel):
27
+ """
28
+ V1UpdateWebhookRequest
29
+ """ # noqa: E501
30
+
31
+ event_key_expression: StrictStr = Field(
32
+ description="The CEL expression to use for the event key. This is used to create the event key from the webhook payload.",
33
+ alias="eventKeyExpression",
34
+ )
35
+ __properties: ClassVar[List[str]] = ["eventKeyExpression"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+ def to_str(self) -> str:
44
+ """Returns the string representation of the model using alias"""
45
+ return pprint.pformat(self.model_dump(by_alias=True))
46
+
47
+ def to_json(self) -> str:
48
+ """Returns the JSON representation of the model using alias"""
49
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
50
+ return json.dumps(self.to_dict())
51
+
52
+ @classmethod
53
+ def from_json(cls, json_str: str) -> Optional[Self]:
54
+ """Create an instance of V1UpdateWebhookRequest from a JSON string"""
55
+ return cls.from_dict(json.loads(json_str))
56
+
57
+ def to_dict(self) -> Dict[str, Any]:
58
+ """Return the dictionary representation of the model using alias.
59
+
60
+ This has the following differences from calling pydantic's
61
+ `self.model_dump(by_alias=True)`:
62
+
63
+ * `None` is only added to the output dict for nullable fields that
64
+ were set at model initialization. Other fields with value `None`
65
+ are ignored.
66
+ """
67
+ excluded_fields: Set[str] = set([])
68
+
69
+ _dict = self.model_dump(
70
+ by_alias=True,
71
+ exclude=excluded_fields,
72
+ exclude_none=True,
73
+ )
74
+ return _dict
75
+
76
+ @classmethod
77
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
78
+ """Create an instance of V1UpdateWebhookRequest from a dict"""
79
+ if obj is None:
80
+ return None
81
+
82
+ if not isinstance(obj, dict):
83
+ return cls.model_validate(obj)
84
+
85
+ _obj = cls.model_validate({"eventKeyExpression": obj.get("eventKeyExpression")})
86
+ return _obj
@@ -31,6 +31,8 @@ class V1WebhookSourceName(str, Enum):
31
31
  GENERIC = "GENERIC"
32
32
  GITHUB = "GITHUB"
33
33
  STRIPE = "STRIPE"
34
+ SLACK = "SLACK"
35
+ LINEAR = "LINEAR"
34
36
 
35
37
  @classmethod
36
38
  def from_json(cls, json_str: str) -> Self:
@@ -25,6 +25,7 @@ from typing_extensions import Annotated, Self
25
25
 
26
26
  from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta
27
27
  from hatchet_sdk.clients.rest.models.recent_step_runs import RecentStepRuns
28
+ from hatchet_sdk.clients.rest.models.registered_workflow import RegisteredWorkflow
28
29
  from hatchet_sdk.clients.rest.models.semaphore_slots import SemaphoreSlots
29
30
  from hatchet_sdk.clients.rest.models.worker_label import WorkerLabel
30
31
  from hatchet_sdk.clients.rest.models.worker_runtime_info import WorkerRuntimeInfo
@@ -52,6 +53,11 @@ class Worker(BaseModel):
52
53
  actions: Optional[List[StrictStr]] = Field(
53
54
  default=None, description="The actions this worker can perform."
54
55
  )
56
+ registered_workflows: Optional[List[RegisteredWorkflow]] = Field(
57
+ default=None,
58
+ description="The workflow ids registered on this worker.",
59
+ alias="registeredWorkflows",
60
+ )
55
61
  slots: Optional[List[SemaphoreSlots]] = Field(
56
62
  default=None, description="The semaphore slot state for the worker."
57
63
  )
@@ -97,6 +103,7 @@ class Worker(BaseModel):
97
103
  "lastHeartbeatAt",
98
104
  "lastListenerEstablished",
99
105
  "actions",
106
+ "registeredWorkflows",
100
107
  "slots",
101
108
  "recentStepRuns",
102
109
  "status",
@@ -161,6 +168,13 @@ class Worker(BaseModel):
161
168
  # override the default output from pydantic by calling `to_dict()` of metadata
162
169
  if self.metadata:
163
170
  _dict["metadata"] = self.metadata.to_dict()
171
+ # override the default output from pydantic by calling `to_dict()` of each item in registered_workflows (list)
172
+ _items = []
173
+ if self.registered_workflows:
174
+ for _item_registered_workflows in self.registered_workflows:
175
+ if _item_registered_workflows:
176
+ _items.append(_item_registered_workflows.to_dict())
177
+ _dict["registeredWorkflows"] = _items
164
178
  # override the default output from pydantic by calling `to_dict()` of each item in slots (list)
165
179
  _items = []
166
180
  if self.slots:
@@ -208,6 +222,14 @@ class Worker(BaseModel):
208
222
  "lastHeartbeatAt": obj.get("lastHeartbeatAt"),
209
223
  "lastListenerEstablished": obj.get("lastListenerEstablished"),
210
224
  "actions": obj.get("actions"),
225
+ "registeredWorkflows": (
226
+ [
227
+ RegisteredWorkflow.from_dict(_item)
228
+ for _item in obj["registeredWorkflows"]
229
+ ]
230
+ if obj.get("registeredWorkflows") is not None
231
+ else None
232
+ ),
211
233
  "slots": (
212
234
  [SemaphoreSlots.from_dict(_item) for _item in obj["slots"]]
213
235
  if obj.get("slots") is not None
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import time
2
3
  from collections.abc import AsyncIterator
3
4
  from datetime import datetime, timedelta, timezone
4
5
  from typing import TYPE_CHECKING, Literal, overload
@@ -32,6 +33,7 @@ from hatchet_sdk.clients.v1.api_client import (
32
33
  from hatchet_sdk.config import ClientConfig
33
34
  from hatchet_sdk.utils.aio import gather_max_concurrency
34
35
  from hatchet_sdk.utils.datetimes import partition_date_range
36
+ from hatchet_sdk.utils.iterables import create_chunks
35
37
  from hatchet_sdk.utils.typing import JSONSerializableMapping
36
38
 
37
39
  if TYPE_CHECKING:
@@ -179,6 +181,216 @@ class RunsClient(BaseRestClient):
179
181
  """
180
182
  return await asyncio.to_thread(self.get_status, workflow_run_id)
181
183
 
184
+ def _perform_action_with_pagination(
185
+ self,
186
+ action: Literal["cancel", "replay"],
187
+ statuses: list[V1TaskStatus],
188
+ sleep_time: int = 3,
189
+ chunk_size: int = 500,
190
+ since: datetime | None = None,
191
+ until: datetime | None = None,
192
+ additional_metadata: dict[str, str] | None = None,
193
+ workflow_ids: list[str] | None = None,
194
+ ) -> None:
195
+ """
196
+ Perform a bulk action (cancel or replay) on runs matching the specified filters in chunks.
197
+
198
+ The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
199
+ the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
200
+
201
+ This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
202
+
203
+ :param action: The action to perform, either "cancel" or "replay".
204
+ :param statuses: The statuses to filter runs by.
205
+ :param sleep_time: The time to sleep between processing chunks, in seconds.
206
+ :param chunk_size: The maximum number of run IDs to process in each chunk.
207
+ :param since: The start time for filtering runs.
208
+ :param until: The end time for filtering runs.
209
+ :param additional_metadata: Additional metadata to filter runs by.
210
+ :param workflow_ids: The workflow IDs to filter runs by.
211
+ """
212
+ until = until or datetime.now(tz=timezone.utc)
213
+ since = since or (until - timedelta(days=1))
214
+
215
+ with self.client() as client:
216
+ external_ids = self._wra(client).v1_workflow_run_external_ids_list(
217
+ tenant=self.client_config.tenant_id,
218
+ since=since,
219
+ until=until,
220
+ additional_metadata=maybe_additional_metadata_to_kv(
221
+ additional_metadata
222
+ ),
223
+ statuses=statuses,
224
+ workflow_ids=workflow_ids,
225
+ )
226
+
227
+ chunks = list(create_chunks(external_ids, chunk_size))
228
+ func = self.bulk_cancel if action == "cancel" else self.bulk_replay
229
+
230
+ for ix, chunk in enumerate(chunks):
231
+ self.client_config.logger.info(
232
+ f"processing chunk {ix + 1}/{len(chunks)} of {len(chunk)} ids" # noqa: G004
233
+ )
234
+
235
+ opts = BulkCancelReplayOpts(ids=chunk)
236
+ func(opts=opts)
237
+
238
+ time.sleep(sleep_time)
239
+
240
+ def bulk_replay_by_filters_with_pagination(
241
+ self,
242
+ sleep_time: int = 3,
243
+ chunk_size: int = 500,
244
+ since: datetime | None = None,
245
+ until: datetime | None = None,
246
+ statuses: list[V1TaskStatus] | None = None,
247
+ additional_metadata: dict[str, str] | None = None,
248
+ workflow_ids: list[str] | None = None,
249
+ ) -> None:
250
+ """
251
+ Replay runs matching the specified filters in chunks.
252
+
253
+ The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
254
+ the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
255
+
256
+ This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
257
+
258
+ :param sleep_time: The time to sleep between processing chunks, in seconds.
259
+ :param chunk_size: The maximum number of run IDs to process in each chunk.
260
+ :param since: The start time for filtering runs.
261
+ :param until: The end time for filtering runs.
262
+ :param statuses: The statuses to filter runs by.
263
+ :param additional_metadata: Additional metadata to filter runs by.
264
+ :param workflow_ids: The workflow IDs to filter runs by.
265
+ """
266
+
267
+ self._perform_action_with_pagination(
268
+ since=since,
269
+ action="replay",
270
+ sleep_time=sleep_time,
271
+ chunk_size=chunk_size,
272
+ until=until,
273
+ statuses=statuses or [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED],
274
+ additional_metadata=additional_metadata,
275
+ workflow_ids=workflow_ids,
276
+ )
277
+
278
+ def bulk_cancel_by_filters_with_pagination(
279
+ self,
280
+ sleep_time: int = 3,
281
+ chunk_size: int = 500,
282
+ since: datetime | None = None,
283
+ until: datetime | None = None,
284
+ statuses: list[V1TaskStatus] | None = None,
285
+ additional_metadata: dict[str, str] | None = None,
286
+ workflow_ids: list[str] | None = None,
287
+ ) -> None:
288
+ """
289
+ Cancel runs matching the specified filters in chunks.
290
+
291
+ The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
292
+ the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
293
+
294
+ This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
295
+
296
+ :param sleep_time: The time to sleep between processing chunks, in seconds.
297
+ :param chunk_size: The maximum number of run IDs to process in each chunk.
298
+ :param since: The start time for filtering runs.
299
+ :param until: The end time for filtering runs.
300
+ :param statuses: The statuses to filter runs by.
301
+ :param additional_metadata: Additional metadata to filter runs by.
302
+ :param workflow_ids: The workflow IDs to filter runs by.
303
+ """
304
+
305
+ self._perform_action_with_pagination(
306
+ since=since,
307
+ action="cancel",
308
+ sleep_time=sleep_time,
309
+ chunk_size=chunk_size,
310
+ until=until,
311
+ statuses=statuses or [V1TaskStatus.RUNNING, V1TaskStatus.QUEUED],
312
+ additional_metadata=additional_metadata,
313
+ workflow_ids=workflow_ids,
314
+ )
315
+
316
+ async def aio_bulk_replay_by_filters_with_pagination(
317
+ self,
318
+ sleep_time: int = 3,
319
+ chunk_size: int = 500,
320
+ since: datetime | None = None,
321
+ until: datetime | None = None,
322
+ statuses: list[V1TaskStatus] | None = None,
323
+ additional_metadata: dict[str, str] | None = None,
324
+ workflow_ids: list[str] | None = None,
325
+ ) -> None:
326
+ """
327
+ Replay runs matching the specified filters in chunks.
328
+
329
+ The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
330
+ the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
331
+
332
+ This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
333
+
334
+ :param sleep_time: The time to sleep between processing chunks, in seconds.
335
+ :param chunk_size: The maximum number of run IDs to process in each chunk.
336
+ :param since: The start time for filtering runs.
337
+ :param until: The end time for filtering runs.
338
+ :param statuses: The statuses to filter runs by.
339
+ :param additional_metadata: Additional metadata to filter runs by.
340
+ :param workflow_ids: The workflow IDs to filter runs by.
341
+ """
342
+
343
+ await asyncio.to_thread(
344
+ self._perform_action_with_pagination,
345
+ since=since,
346
+ action="replay",
347
+ sleep_time=sleep_time,
348
+ chunk_size=chunk_size,
349
+ until=until,
350
+ statuses=statuses or [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED],
351
+ additional_metadata=additional_metadata,
352
+ workflow_ids=workflow_ids,
353
+ )
354
+
355
+ async def aio_bulk_cancel_by_filters_with_pagination(
356
+ self,
357
+ sleep_time: int = 3,
358
+ chunk_size: int = 500,
359
+ since: datetime | None = None,
360
+ until: datetime | None = None,
361
+ statuses: list[V1TaskStatus] | None = None,
362
+ additional_metadata: dict[str, str] | None = None,
363
+ workflow_ids: list[str] | None = None,
364
+ ) -> None:
365
+ """
366
+ Cancel runs matching the specified filters in chunks.
367
+
368
+ The motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than
369
+ the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.
370
+
371
+ This method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.
372
+
373
+ :param sleep_time: The time to sleep between processing chunks, in seconds.
374
+ :param chunk_size: The maximum number of run IDs to process in each chunk.
375
+ :param since: The start time for filtering runs.
376
+ :param until: The end time for filtering runs.
377
+ :param statuses: The statuses to filter runs by.
378
+ :param additional_metadata: Additional metadata to filter runs by.
379
+ :param workflow_ids: The workflow IDs to filter runs by.
380
+ """
381
+
382
+ await asyncio.to_thread(
383
+ self._perform_action_with_pagination,
384
+ since=since,
385
+ action="cancel",
386
+ sleep_time=sleep_time,
387
+ chunk_size=chunk_size,
388
+ until=until,
389
+ statuses=statuses or [V1TaskStatus.RUNNING, V1TaskStatus.QUEUED],
390
+ additional_metadata=additional_metadata,
391
+ workflow_ids=workflow_ids,
392
+ )
393
+
182
394
  @retry
183
395
  def list_with_pagination(
184
396
  self,
@@ -193,6 +405,7 @@ class RunsClient(BaseRestClient):
193
405
  worker_id: str | None = None,
194
406
  parent_task_external_id: str | None = None,
195
407
  triggering_event_external_id: str | None = None,
408
+ include_payloads: bool = True,
196
409
  ) -> list[V1TaskSummary]:
197
410
  """
198
411
  List task runs according to a set of filters, paginating through days
@@ -208,6 +421,7 @@ class RunsClient(BaseRestClient):
208
421
  :param worker_id: The worker ID to filter task runs by.
209
422
  :param parent_task_external_id: The parent task external ID to filter task runs by.
210
423
  :param triggering_event_external_id: The event id that triggered the task run.
424
+ :param include_payloads: Whether to include payloads in the response.
211
425
 
212
426
  :return: A list of task runs matching the specified filters.
213
427
  """
@@ -234,6 +448,7 @@ class RunsClient(BaseRestClient):
234
448
  worker_id=worker_id,
235
449
  parent_task_external_id=parent_task_external_id,
236
450
  triggering_event_external_id=triggering_event_external_id,
451
+ include_payloads=include_payloads,
237
452
  )
238
453
  for s, u in date_ranges
239
454
  ]
@@ -263,6 +478,7 @@ class RunsClient(BaseRestClient):
263
478
  worker_id: str | None = None,
264
479
  parent_task_external_id: str | None = None,
265
480
  triggering_event_external_id: str | None = None,
481
+ include_payloads: bool = True,
266
482
  ) -> list[V1TaskSummary]:
267
483
  """
268
484
  List task runs according to a set of filters, paginating through days
@@ -278,6 +494,7 @@ class RunsClient(BaseRestClient):
278
494
  :param worker_id: The worker ID to filter task runs by.
279
495
  :param parent_task_external_id: The parent task external ID to filter task runs by.
280
496
  :param triggering_event_external_id: The event id that triggered the task run.
497
+ :param include_payloads: Whether to include payloads in the response.
281
498
 
282
499
  :return: A list of task runs matching the specified filters.
283
500
  """
@@ -305,6 +522,7 @@ class RunsClient(BaseRestClient):
305
522
  worker_id=worker_id,
306
523
  parent_task_external_id=parent_task_external_id,
307
524
  triggering_event_external_id=triggering_event_external_id,
525
+ include_payloads=include_payloads,
308
526
  )
309
527
  for s, u in date_ranges
310
528
  ]
@@ -338,6 +556,7 @@ class RunsClient(BaseRestClient):
338
556
  worker_id: str | None = None,
339
557
  parent_task_external_id: str | None = None,
340
558
  triggering_event_external_id: str | None = None,
559
+ include_payloads: bool = True,
341
560
  ) -> V1TaskSummaryList:
342
561
  """
343
562
  List task runs according to a set of filters.
@@ -353,6 +572,7 @@ class RunsClient(BaseRestClient):
353
572
  :param worker_id: The worker ID to filter task runs by.
354
573
  :param parent_task_external_id: The parent task external ID to filter task runs by.
355
574
  :param triggering_event_external_id: The event id that triggered the task run.
575
+ :param include_payloads: Whether to include payloads in the response.
356
576
 
357
577
  :return: A list of task runs matching the specified filters.
358
578
  """
@@ -369,6 +589,7 @@ class RunsClient(BaseRestClient):
369
589
  worker_id=worker_id,
370
590
  parent_task_external_id=parent_task_external_id,
371
591
  triggering_event_external_id=triggering_event_external_id,
592
+ include_payloads=include_payloads,
372
593
  )
373
594
 
374
595
  @retry
@@ -385,6 +606,7 @@ class RunsClient(BaseRestClient):
385
606
  worker_id: str | None = None,
386
607
  parent_task_external_id: str | None = None,
387
608
  triggering_event_external_id: str | None = None,
609
+ include_payloads: bool = True,
388
610
  ) -> V1TaskSummaryList:
389
611
  """
390
612
  List task runs according to a set of filters.
@@ -400,6 +622,7 @@ class RunsClient(BaseRestClient):
400
622
  :param worker_id: The worker ID to filter task runs by.
401
623
  :param parent_task_external_id: The parent task external ID to filter task runs by.
402
624
  :param triggering_event_external_id: The event id that triggered the task run.
625
+ :param include_payloads: Whether to include payloads in the response.
403
626
 
404
627
  :return: A list of task runs matching the specified filters.
405
628
  """
@@ -431,6 +654,7 @@ class RunsClient(BaseRestClient):
431
654
  worker_id=worker_id,
432
655
  parent_task_external_id=parent_task_external_id,
433
656
  triggering_event_external_id=triggering_event_external_id,
657
+ include_payloads=include_payloads,
434
658
  )
435
659
 
436
660
  def create(
@@ -0,0 +1,9 @@
1
+ from collections.abc import Generator
2
+ from typing import TypeVar
3
+
4
+ T = TypeVar("T")
5
+
6
+
7
+ def create_chunks(xs: list[T], n: int) -> Generator[list[T], None, None]:
8
+ for i in range(0, len(xs), n):
9
+ yield xs[i : i + n]