vellum-ai 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. vellum/__init__.py +16 -0
  2. vellum/client/README.md +55 -0
  3. vellum/client/__init__.py +66 -507
  4. vellum/client/core/client_wrapper.py +2 -2
  5. vellum/client/raw_client.py +844 -0
  6. vellum/client/reference.md +692 -19
  7. vellum/client/resources/ad_hoc/client.py +23 -180
  8. vellum/client/resources/ad_hoc/raw_client.py +276 -0
  9. vellum/client/resources/container_images/client.py +10 -36
  10. vellum/client/resources/deployments/client.py +16 -62
  11. vellum/client/resources/document_indexes/client.py +16 -72
  12. vellum/client/resources/documents/client.py +8 -30
  13. vellum/client/resources/folder_entities/client.py +4 -8
  14. vellum/client/resources/metric_definitions/client.py +4 -14
  15. vellum/client/resources/ml_models/client.py +2 -8
  16. vellum/client/resources/organizations/client.py +2 -6
  17. vellum/client/resources/prompts/client.py +2 -10
  18. vellum/client/resources/sandboxes/client.py +4 -20
  19. vellum/client/resources/test_suite_runs/client.py +4 -18
  20. vellum/client/resources/test_suites/client.py +11 -86
  21. vellum/client/resources/test_suites/raw_client.py +136 -0
  22. vellum/client/resources/workflow_deployments/client.py +20 -78
  23. vellum/client/resources/workflow_executions/client.py +2 -6
  24. vellum/client/resources/workflow_sandboxes/client.py +2 -10
  25. vellum/client/resources/workflows/client.py +7 -6
  26. vellum/client/resources/workflows/raw_client.py +58 -47
  27. vellum/client/resources/workspace_secrets/client.py +4 -20
  28. vellum/client/resources/workspaces/client.py +2 -6
  29. vellum/client/types/__init__.py +16 -0
  30. vellum/client/types/array_chat_message_content_item.py +4 -2
  31. vellum/client/types/array_chat_message_content_item_request.py +4 -2
  32. vellum/client/types/chat_message_content.py +4 -2
  33. vellum/client/types/chat_message_content_request.py +4 -2
  34. vellum/client/types/node_execution_span.py +2 -0
  35. vellum/client/types/prompt_block.py +4 -2
  36. vellum/client/types/vellum_value.py +4 -2
  37. vellum/client/types/vellum_value_request.py +4 -2
  38. vellum/client/types/vellum_variable_type.py +2 -1
  39. vellum/client/types/vellum_video.py +24 -0
  40. vellum/client/types/vellum_video_request.py +24 -0
  41. vellum/client/types/video_chat_message_content.py +25 -0
  42. vellum/client/types/video_chat_message_content_request.py +25 -0
  43. vellum/client/types/video_prompt_block.py +29 -0
  44. vellum/client/types/video_vellum_value.py +25 -0
  45. vellum/client/types/video_vellum_value_request.py +25 -0
  46. vellum/client/types/workflow_execution_span.py +2 -0
  47. vellum/client/types/workflow_execution_usage_calculation_fulfilled_body.py +22 -0
  48. vellum/prompts/blocks/compilation.py +22 -10
  49. vellum/types/vellum_video.py +3 -0
  50. vellum/types/vellum_video_request.py +3 -0
  51. vellum/types/video_chat_message_content.py +3 -0
  52. vellum/types/video_chat_message_content_request.py +3 -0
  53. vellum/types/video_prompt_block.py +3 -0
  54. vellum/types/video_vellum_value.py +3 -0
  55. vellum/types/video_vellum_value_request.py +3 -0
  56. vellum/types/workflow_execution_usage_calculation_fulfilled_body.py +3 -0
  57. vellum/workflows/events/workflow.py +11 -0
  58. vellum/workflows/graph/graph.py +103 -1
  59. vellum/workflows/graph/tests/test_graph.py +99 -0
  60. vellum/workflows/nodes/bases/base.py +9 -1
  61. vellum/workflows/nodes/displayable/bases/utils.py +4 -2
  62. vellum/workflows/nodes/displayable/tool_calling_node/node.py +19 -18
  63. vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +17 -7
  64. vellum/workflows/nodes/displayable/tool_calling_node/tests/test_utils.py +7 -7
  65. vellum/workflows/nodes/displayable/tool_calling_node/utils.py +47 -80
  66. vellum/workflows/references/environment_variable.py +10 -0
  67. vellum/workflows/runner/runner.py +18 -2
  68. vellum/workflows/state/context.py +101 -12
  69. vellum/workflows/types/definition.py +11 -1
  70. vellum/workflows/types/tests/test_definition.py +19 -0
  71. vellum/workflows/utils/vellum_variables.py +9 -5
  72. vellum/workflows/workflows/base.py +12 -5
  73. {vellum_ai-1.1.2.dist-info → vellum_ai-1.1.3.dist-info}/METADATA +1 -1
  74. {vellum_ai-1.1.2.dist-info → vellum_ai-1.1.3.dist-info}/RECORD +84 -68
  75. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +1 -1
  76. vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +55 -1
  77. vellum_ee/workflows/display/nodes/vellum/tests/test_tool_calling_node.py +15 -52
  78. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_mcp_serialization.py +15 -49
  79. vellum_ee/workflows/display/types.py +14 -1
  80. vellum_ee/workflows/display/utils/expressions.py +13 -4
  81. vellum_ee/workflows/display/workflows/base_workflow_display.py +6 -19
  82. {vellum_ai-1.1.2.dist-info → vellum_ai-1.1.3.dist-info}/LICENSE +0 -0
  83. {vellum_ai-1.1.2.dist-info → vellum_ai-1.1.3.dist-info}/WHEEL +0 -0
  84. {vellum_ai-1.1.2.dist-info → vellum_ai-1.1.3.dist-info}/entry_points.txt +0 -0
@@ -12,14 +12,6 @@ from ...types.function_definition import FunctionDefinition
12
12
  from ...types.ad_hoc_expand_meta import AdHocExpandMeta
13
13
  from ...core.request_options import RequestOptions
14
14
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
15
- from ...core.serialization import convert_and_respect_annotation_metadata
16
- from ...core.pydantic_utilities import parse_obj_as
17
- import json
18
- from ...errors.bad_request_error import BadRequestError
19
- from ...errors.forbidden_error import ForbiddenError
20
- from ...errors.internal_server_error import InternalServerError
21
- from json.decoder import JSONDecodeError
22
- from ...core.api_error import ApiError
23
15
  from ...core.client_wrapper import AsyncClientWrapper
24
16
  from .raw_client import AsyncRawAdHocClient
25
17
 
@@ -239,93 +231,18 @@ class AdHocClient:
239
231
  for chunk in response:
240
232
  yield chunk
241
233
  """
242
- with self._raw_client._client_wrapper.httpx_client.stream(
243
- "v1/ad-hoc/execute-prompt-stream",
244
- base_url=self._raw_client._client_wrapper.get_environment().predict,
245
- method="POST",
246
- json={
247
- "ml_model": ml_model,
248
- "input_values": convert_and_respect_annotation_metadata(
249
- object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
250
- ),
251
- "input_variables": convert_and_respect_annotation_metadata(
252
- object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
253
- ),
254
- "parameters": convert_and_respect_annotation_metadata(
255
- object_=parameters, annotation=PromptParameters, direction="write"
256
- ),
257
- "settings": convert_and_respect_annotation_metadata(
258
- object_=settings, annotation=typing.Optional[PromptSettings], direction="write"
259
- ),
260
- "blocks": convert_and_respect_annotation_metadata(
261
- object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
262
- ),
263
- "functions": convert_and_respect_annotation_metadata(
264
- object_=functions,
265
- annotation=typing.Optional[typing.Sequence[FunctionDefinition]],
266
- direction="write",
267
- ),
268
- "expand_meta": convert_and_respect_annotation_metadata(
269
- object_=expand_meta, annotation=typing.Optional[AdHocExpandMeta], direction="write"
270
- ),
271
- },
272
- headers={
273
- "content-type": "application/json",
274
- },
234
+ with self._raw_client.adhoc_execute_prompt_stream(
235
+ ml_model=ml_model,
236
+ input_values=input_values,
237
+ input_variables=input_variables,
238
+ parameters=parameters,
239
+ blocks=blocks,
240
+ settings=settings,
241
+ functions=functions,
242
+ expand_meta=expand_meta,
275
243
  request_options=request_options,
276
- omit=OMIT,
277
- ) as _response:
278
- try:
279
- if 200 <= _response.status_code < 300:
280
- for _text in _response.iter_lines():
281
- try:
282
- if len(_text) == 0:
283
- continue
284
- yield typing.cast(
285
- AdHocExecutePromptEvent,
286
- parse_obj_as(
287
- type_=AdHocExecutePromptEvent, # type: ignore
288
- object_=json.loads(_text),
289
- ),
290
- )
291
- except Exception:
292
- pass
293
- return
294
- _response.read()
295
- if _response.status_code == 400:
296
- raise BadRequestError(
297
- typing.cast(
298
- typing.Optional[typing.Any],
299
- parse_obj_as(
300
- type_=typing.Optional[typing.Any], # type: ignore
301
- object_=_response.json(),
302
- ),
303
- )
304
- )
305
- if _response.status_code == 403:
306
- raise ForbiddenError(
307
- typing.cast(
308
- typing.Optional[typing.Any],
309
- parse_obj_as(
310
- type_=typing.Optional[typing.Any], # type: ignore
311
- object_=_response.json(),
312
- ),
313
- )
314
- )
315
- if _response.status_code == 500:
316
- raise InternalServerError(
317
- typing.cast(
318
- typing.Optional[typing.Any],
319
- parse_obj_as(
320
- type_=typing.Optional[typing.Any], # type: ignore
321
- object_=_response.json(),
322
- ),
323
- )
324
- )
325
- _response_json = _response.json()
326
- except JSONDecodeError:
327
- raise ApiError(status_code=_response.status_code, body=_response.text)
328
- raise ApiError(status_code=_response.status_code, body=_response_json)
244
+ ) as r:
245
+ yield from r.data
329
246
 
330
247
 
331
248
  class AsyncAdHocClient:
@@ -552,90 +469,16 @@ class AsyncAdHocClient:
552
469
 
553
470
  asyncio.run(main())
554
471
  """
555
- async with self._raw_client._client_wrapper.httpx_client.stream(
556
- "v1/ad-hoc/execute-prompt-stream",
557
- base_url=self._raw_client._client_wrapper.get_environment().predict,
558
- method="POST",
559
- json={
560
- "ml_model": ml_model,
561
- "input_values": convert_and_respect_annotation_metadata(
562
- object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
563
- ),
564
- "input_variables": convert_and_respect_annotation_metadata(
565
- object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
566
- ),
567
- "parameters": convert_and_respect_annotation_metadata(
568
- object_=parameters, annotation=PromptParameters, direction="write"
569
- ),
570
- "settings": convert_and_respect_annotation_metadata(
571
- object_=settings, annotation=typing.Optional[PromptSettings], direction="write"
572
- ),
573
- "blocks": convert_and_respect_annotation_metadata(
574
- object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
575
- ),
576
- "functions": convert_and_respect_annotation_metadata(
577
- object_=functions,
578
- annotation=typing.Optional[typing.Sequence[FunctionDefinition]],
579
- direction="write",
580
- ),
581
- "expand_meta": convert_and_respect_annotation_metadata(
582
- object_=expand_meta, annotation=typing.Optional[AdHocExpandMeta], direction="write"
583
- ),
584
- },
585
- headers={
586
- "content-type": "application/json",
587
- },
472
+ async with self._raw_client.adhoc_execute_prompt_stream(
473
+ ml_model=ml_model,
474
+ input_values=input_values,
475
+ input_variables=input_variables,
476
+ parameters=parameters,
477
+ blocks=blocks,
478
+ settings=settings,
479
+ functions=functions,
480
+ expand_meta=expand_meta,
588
481
  request_options=request_options,
589
- omit=OMIT,
590
- ) as _response:
591
- try:
592
- if 200 <= _response.status_code < 300:
593
- async for _text in _response.aiter_lines():
594
- try:
595
- if len(_text) == 0:
596
- continue
597
- yield typing.cast(
598
- AdHocExecutePromptEvent,
599
- parse_obj_as(
600
- type_=AdHocExecutePromptEvent, # type: ignore
601
- object_=json.loads(_text),
602
- ),
603
- )
604
- except Exception:
605
- pass
606
- return
607
- await _response.aread()
608
- if _response.status_code == 400:
609
- raise BadRequestError(
610
- typing.cast(
611
- typing.Optional[typing.Any],
612
- parse_obj_as(
613
- type_=typing.Optional[typing.Any], # type: ignore
614
- object_=_response.json(),
615
- ),
616
- )
617
- )
618
- if _response.status_code == 403:
619
- raise ForbiddenError(
620
- typing.cast(
621
- typing.Optional[typing.Any],
622
- parse_obj_as(
623
- type_=typing.Optional[typing.Any], # type: ignore
624
- object_=_response.json(),
625
- ),
626
- )
627
- )
628
- if _response.status_code == 500:
629
- raise InternalServerError(
630
- typing.cast(
631
- typing.Optional[typing.Any],
632
- parse_obj_as(
633
- type_=typing.Optional[typing.Any], # type: ignore
634
- object_=_response.json(),
635
- ),
636
- )
637
- )
638
- _response_json = _response.json()
639
- except JSONDecodeError:
640
- raise ApiError(status_code=_response.status_code, body=_response.text)
641
- raise ApiError(status_code=_response.status_code, body=_response_json)
482
+ ) as r:
483
+ async for data in r.data:
484
+ yield data
@@ -19,6 +19,8 @@ from ...errors.forbidden_error import ForbiddenError
19
19
  from ...errors.internal_server_error import InternalServerError
20
20
  from json.decoder import JSONDecodeError
21
21
  from ...core.api_error import ApiError
22
+ import json
23
+ import contextlib
22
24
  from ...core.client_wrapper import AsyncClientWrapper
23
25
  from ...core.http_response import AsyncHttpResponse
24
26
 
@@ -151,6 +153,143 @@ class RawAdHocClient:
151
153
  raise ApiError(status_code=_response.status_code, body=_response.text)
152
154
  raise ApiError(status_code=_response.status_code, body=_response_json)
153
155
 
156
+ @contextlib.contextmanager
157
+ def adhoc_execute_prompt_stream(
158
+ self,
159
+ *,
160
+ ml_model: str,
161
+ input_values: typing.Sequence[PromptRequestInput],
162
+ input_variables: typing.Sequence[VellumVariable],
163
+ parameters: PromptParameters,
164
+ blocks: typing.Sequence[PromptBlock],
165
+ settings: typing.Optional[PromptSettings] = OMIT,
166
+ functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
167
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
168
+ request_options: typing.Optional[RequestOptions] = None,
169
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AdHocExecutePromptEvent]]]:
170
+ """
171
+ Parameters
172
+ ----------
173
+ ml_model : str
174
+
175
+ input_values : typing.Sequence[PromptRequestInput]
176
+
177
+ input_variables : typing.Sequence[VellumVariable]
178
+
179
+ parameters : PromptParameters
180
+
181
+ blocks : typing.Sequence[PromptBlock]
182
+
183
+ settings : typing.Optional[PromptSettings]
184
+
185
+ functions : typing.Optional[typing.Sequence[FunctionDefinition]]
186
+
187
+ expand_meta : typing.Optional[AdHocExpandMeta]
188
+
189
+ request_options : typing.Optional[RequestOptions]
190
+ Request-specific configuration.
191
+
192
+ Yields
193
+ ------
194
+ typing.Iterator[HttpResponse[typing.Iterator[AdHocExecutePromptEvent]]]
195
+
196
+ """
197
+ with self._client_wrapper.httpx_client.stream(
198
+ "v1/ad-hoc/execute-prompt-stream",
199
+ base_url=self._client_wrapper.get_environment().predict,
200
+ method="POST",
201
+ json={
202
+ "ml_model": ml_model,
203
+ "input_values": convert_and_respect_annotation_metadata(
204
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
205
+ ),
206
+ "input_variables": convert_and_respect_annotation_metadata(
207
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
208
+ ),
209
+ "parameters": convert_and_respect_annotation_metadata(
210
+ object_=parameters, annotation=PromptParameters, direction="write"
211
+ ),
212
+ "settings": convert_and_respect_annotation_metadata(
213
+ object_=settings, annotation=typing.Optional[PromptSettings], direction="write"
214
+ ),
215
+ "blocks": convert_and_respect_annotation_metadata(
216
+ object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
217
+ ),
218
+ "functions": convert_and_respect_annotation_metadata(
219
+ object_=functions,
220
+ annotation=typing.Optional[typing.Sequence[FunctionDefinition]],
221
+ direction="write",
222
+ ),
223
+ "expand_meta": convert_and_respect_annotation_metadata(
224
+ object_=expand_meta, annotation=typing.Optional[AdHocExpandMeta], direction="write"
225
+ ),
226
+ },
227
+ headers={
228
+ "content-type": "application/json",
229
+ },
230
+ request_options=request_options,
231
+ omit=OMIT,
232
+ ) as _response:
233
+
234
+ def stream() -> HttpResponse[typing.Iterator[AdHocExecutePromptEvent]]:
235
+ try:
236
+ if 200 <= _response.status_code < 300:
237
+
238
+ def _iter():
239
+ for _text in _response.iter_lines():
240
+ try:
241
+ if len(_text) == 0:
242
+ continue
243
+ yield typing.cast(
244
+ AdHocExecutePromptEvent,
245
+ parse_obj_as(
246
+ type_=AdHocExecutePromptEvent, # type: ignore
247
+ object_=json.loads(_text),
248
+ ),
249
+ )
250
+ except Exception:
251
+ pass
252
+ return
253
+
254
+ return HttpResponse(response=_response, data=_iter())
255
+ _response.read()
256
+ if _response.status_code == 400:
257
+ raise BadRequestError(
258
+ typing.cast(
259
+ typing.Optional[typing.Any],
260
+ parse_obj_as(
261
+ type_=typing.Optional[typing.Any], # type: ignore
262
+ object_=_response.json(),
263
+ ),
264
+ )
265
+ )
266
+ if _response.status_code == 403:
267
+ raise ForbiddenError(
268
+ typing.cast(
269
+ typing.Optional[typing.Any],
270
+ parse_obj_as(
271
+ type_=typing.Optional[typing.Any], # type: ignore
272
+ object_=_response.json(),
273
+ ),
274
+ )
275
+ )
276
+ if _response.status_code == 500:
277
+ raise InternalServerError(
278
+ typing.cast(
279
+ typing.Optional[typing.Any],
280
+ parse_obj_as(
281
+ type_=typing.Optional[typing.Any], # type: ignore
282
+ object_=_response.json(),
283
+ ),
284
+ )
285
+ )
286
+ _response_json = _response.json()
287
+ except JSONDecodeError:
288
+ raise ApiError(status_code=_response.status_code, body=_response.text)
289
+ raise ApiError(status_code=_response.status_code, body=_response_json)
290
+
291
+ yield stream()
292
+
154
293
 
155
294
  class AsyncRawAdHocClient:
156
295
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -276,3 +415,140 @@ class AsyncRawAdHocClient:
276
415
  except JSONDecodeError:
277
416
  raise ApiError(status_code=_response.status_code, body=_response.text)
278
417
  raise ApiError(status_code=_response.status_code, body=_response_json)
418
+
419
+ @contextlib.asynccontextmanager
420
+ async def adhoc_execute_prompt_stream(
421
+ self,
422
+ *,
423
+ ml_model: str,
424
+ input_values: typing.Sequence[PromptRequestInput],
425
+ input_variables: typing.Sequence[VellumVariable],
426
+ parameters: PromptParameters,
427
+ blocks: typing.Sequence[PromptBlock],
428
+ settings: typing.Optional[PromptSettings] = OMIT,
429
+ functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
430
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
431
+ request_options: typing.Optional[RequestOptions] = None,
432
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AdHocExecutePromptEvent]]]:
433
+ """
434
+ Parameters
435
+ ----------
436
+ ml_model : str
437
+
438
+ input_values : typing.Sequence[PromptRequestInput]
439
+
440
+ input_variables : typing.Sequence[VellumVariable]
441
+
442
+ parameters : PromptParameters
443
+
444
+ blocks : typing.Sequence[PromptBlock]
445
+
446
+ settings : typing.Optional[PromptSettings]
447
+
448
+ functions : typing.Optional[typing.Sequence[FunctionDefinition]]
449
+
450
+ expand_meta : typing.Optional[AdHocExpandMeta]
451
+
452
+ request_options : typing.Optional[RequestOptions]
453
+ Request-specific configuration.
454
+
455
+ Yields
456
+ ------
457
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AdHocExecutePromptEvent]]]
458
+
459
+ """
460
+ async with self._client_wrapper.httpx_client.stream(
461
+ "v1/ad-hoc/execute-prompt-stream",
462
+ base_url=self._client_wrapper.get_environment().predict,
463
+ method="POST",
464
+ json={
465
+ "ml_model": ml_model,
466
+ "input_values": convert_and_respect_annotation_metadata(
467
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
468
+ ),
469
+ "input_variables": convert_and_respect_annotation_metadata(
470
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
471
+ ),
472
+ "parameters": convert_and_respect_annotation_metadata(
473
+ object_=parameters, annotation=PromptParameters, direction="write"
474
+ ),
475
+ "settings": convert_and_respect_annotation_metadata(
476
+ object_=settings, annotation=typing.Optional[PromptSettings], direction="write"
477
+ ),
478
+ "blocks": convert_and_respect_annotation_metadata(
479
+ object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
480
+ ),
481
+ "functions": convert_and_respect_annotation_metadata(
482
+ object_=functions,
483
+ annotation=typing.Optional[typing.Sequence[FunctionDefinition]],
484
+ direction="write",
485
+ ),
486
+ "expand_meta": convert_and_respect_annotation_metadata(
487
+ object_=expand_meta, annotation=typing.Optional[AdHocExpandMeta], direction="write"
488
+ ),
489
+ },
490
+ headers={
491
+ "content-type": "application/json",
492
+ },
493
+ request_options=request_options,
494
+ omit=OMIT,
495
+ ) as _response:
496
+
497
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AdHocExecutePromptEvent]]:
498
+ try:
499
+ if 200 <= _response.status_code < 300:
500
+
501
+ async def _iter():
502
+ async for _text in _response.aiter_lines():
503
+ try:
504
+ if len(_text) == 0:
505
+ continue
506
+ yield typing.cast(
507
+ AdHocExecutePromptEvent,
508
+ parse_obj_as(
509
+ type_=AdHocExecutePromptEvent, # type: ignore
510
+ object_=json.loads(_text),
511
+ ),
512
+ )
513
+ except Exception:
514
+ pass
515
+ return
516
+
517
+ return AsyncHttpResponse(response=_response, data=_iter())
518
+ await _response.aread()
519
+ if _response.status_code == 400:
520
+ raise BadRequestError(
521
+ typing.cast(
522
+ typing.Optional[typing.Any],
523
+ parse_obj_as(
524
+ type_=typing.Optional[typing.Any], # type: ignore
525
+ object_=_response.json(),
526
+ ),
527
+ )
528
+ )
529
+ if _response.status_code == 403:
530
+ raise ForbiddenError(
531
+ typing.cast(
532
+ typing.Optional[typing.Any],
533
+ parse_obj_as(
534
+ type_=typing.Optional[typing.Any], # type: ignore
535
+ object_=_response.json(),
536
+ ),
537
+ )
538
+ )
539
+ if _response.status_code == 500:
540
+ raise InternalServerError(
541
+ typing.cast(
542
+ typing.Optional[typing.Any],
543
+ parse_obj_as(
544
+ type_=typing.Optional[typing.Any], # type: ignore
545
+ object_=_response.json(),
546
+ ),
547
+ )
548
+ )
549
+ _response_json = _response.json()
550
+ except JSONDecodeError:
551
+ raise ApiError(status_code=_response.status_code, body=_response.text)
552
+ raise ApiError(status_code=_response.status_code, body=_response_json)
553
+
554
+ yield await stream()
@@ -33,7 +33,7 @@ class ContainerImagesClient:
33
33
  """
34
34
  return self._raw_client
35
35
 
36
- def list_(
36
+ def list(
37
37
  self,
38
38
  *,
39
39
  limit: typing.Optional[int] = None,
@@ -73,12 +73,7 @@ class ContainerImagesClient:
73
73
  )
74
74
  client.container_images.list()
75
75
  """
76
- response = self._raw_client.list(
77
- limit=limit,
78
- offset=offset,
79
- ordering=ordering,
80
- request_options=request_options,
81
- )
76
+ response = self._raw_client.list(limit=limit, offset=offset, ordering=ordering, request_options=request_options)
82
77
  return response.data
83
78
 
84
79
  def retrieve(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> ContainerImageRead:
@@ -110,10 +105,7 @@ class ContainerImagesClient:
110
105
  id="id",
111
106
  )
112
107
  """
113
- response = self._raw_client.retrieve(
114
- id,
115
- request_options=request_options,
116
- )
108
+ response = self._raw_client.retrieve(id, request_options=request_options)
117
109
  return response.data
118
110
 
119
111
  def docker_service_token(self, *, request_options: typing.Optional[RequestOptions] = None) -> DockerServiceToken:
@@ -138,9 +130,7 @@ class ContainerImagesClient:
138
130
  )
139
131
  client.container_images.docker_service_token()
140
132
  """
141
- response = self._raw_client.docker_service_token(
142
- request_options=request_options,
143
- )
133
+ response = self._raw_client.docker_service_token(request_options=request_options)
144
134
  return response.data
145
135
 
146
136
  def push_container_image(
@@ -182,12 +172,7 @@ class ContainerImagesClient:
182
172
  tags=["tags", "tags"],
183
173
  )
184
174
  """
185
- response = self._raw_client.push_container_image(
186
- name=name,
187
- sha=sha,
188
- tags=tags,
189
- request_options=request_options,
190
- )
175
+ response = self._raw_client.push_container_image(name=name, sha=sha, tags=tags, request_options=request_options)
191
176
  return response.data
192
177
 
193
178
 
@@ -206,7 +191,7 @@ class AsyncContainerImagesClient:
206
191
  """
207
192
  return self._raw_client
208
193
 
209
- async def list_(
194
+ async def list(
210
195
  self,
211
196
  *,
212
197
  limit: typing.Optional[int] = None,
@@ -255,10 +240,7 @@ class AsyncContainerImagesClient:
255
240
  asyncio.run(main())
256
241
  """
257
242
  response = await self._raw_client.list(
258
- limit=limit,
259
- offset=offset,
260
- ordering=ordering,
261
- request_options=request_options,
243
+ limit=limit, offset=offset, ordering=ordering, request_options=request_options
262
244
  )
263
245
  return response.data
264
246
 
@@ -299,10 +281,7 @@ class AsyncContainerImagesClient:
299
281
 
300
282
  asyncio.run(main())
301
283
  """
302
- response = await self._raw_client.retrieve(
303
- id,
304
- request_options=request_options,
305
- )
284
+ response = await self._raw_client.retrieve(id, request_options=request_options)
306
285
  return response.data
307
286
 
308
287
  async def docker_service_token(
@@ -337,9 +316,7 @@ class AsyncContainerImagesClient:
337
316
 
338
317
  asyncio.run(main())
339
318
  """
340
- response = await self._raw_client.docker_service_token(
341
- request_options=request_options,
342
- )
319
+ response = await self._raw_client.docker_service_token(request_options=request_options)
343
320
  return response.data
344
321
 
345
322
  async def push_container_image(
@@ -390,9 +367,6 @@ class AsyncContainerImagesClient:
390
367
  asyncio.run(main())
391
368
  """
392
369
  response = await self._raw_client.push_container_image(
393
- name=name,
394
- sha=sha,
395
- tags=tags,
396
- request_options=request_options,
370
+ name=name, sha=sha, tags=tags, request_options=request_options
397
371
  )
398
372
  return response.data