vellum-ai 0.14.41__py3-none-any.whl → 0.14.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. vellum/__init__.py +4 -4
  2. vellum/client/core/client_wrapper.py +2 -2
  3. vellum/client/reference.md +110 -3
  4. vellum/client/resources/documents/client.py +0 -6
  5. vellum/client/resources/prompts/client.py +228 -1
  6. vellum/client/types/__init__.py +4 -4
  7. vellum/client/types/deployment_read.py +2 -2
  8. vellum/client/types/execute_api_response.py +3 -4
  9. vellum/client/types/execute_api_response_json.py +7 -0
  10. vellum/client/types/{workflow_event_display_context.py → prompt_push_response.py} +4 -12
  11. vellum/client/types/prompt_settings.py +1 -0
  12. vellum/client/types/workflow_event_execution_read.py +0 -4
  13. vellum/client/types/workflow_execution_initiated_body.py +0 -9
  14. vellum/client/types/workflow_execution_initiated_event.py +0 -4
  15. vellum/client/types/workflow_execution_span.py +0 -4
  16. vellum/types/{node_event_display_context.py → execute_api_response_json.py} +1 -1
  17. vellum/types/{workflow_event_display_context.py → prompt_push_response.py} +1 -1
  18. vellum/workflows/inputs/base.py +26 -3
  19. vellum/workflows/inputs/tests/test_inputs.py +15 -0
  20. vellum/workflows/nodes/bases/base.py +0 -3
  21. vellum/workflows/nodes/bases/base_adornment_node.py +9 -0
  22. vellum/workflows/nodes/bases/tests/test_base_adornment_node.py +31 -0
  23. vellum/workflows/nodes/core/map_node/node.py +3 -2
  24. vellum/workflows/nodes/core/map_node/tests/test_node.py +56 -0
  25. vellum/workflows/nodes/core/retry_node/node.py +2 -1
  26. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +62 -13
  27. vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +177 -0
  28. vellum/workflows/nodes/experimental/tool_calling_node/node.py +3 -6
  29. vellum/workflows/nodes/experimental/tool_calling_node/utils.py +18 -15
  30. vellum/workflows/nodes/utils.py +14 -1
  31. vellum/workflows/references/output.py +1 -1
  32. vellum/workflows/references/workflow_input.py +5 -1
  33. vellum/workflows/runner/runner.py +2 -0
  34. vellum/workflows/workflows/base.py +5 -0
  35. {vellum_ai-0.14.41.dist-info → vellum_ai-0.14.43.dist-info}/METADATA +1 -1
  36. {vellum_ai-0.14.41.dist-info → vellum_ai-0.14.43.dist-info}/RECORD +68 -67
  37. vellum_cli/pull.py +7 -0
  38. vellum_cli/tests/test_pull.py +23 -0
  39. vellum_ee/workflows/display/nodes/base_node_display.py +32 -23
  40. vellum_ee/workflows/display/nodes/vellum/api_node.py +1 -0
  41. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +1 -0
  42. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +1 -0
  43. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +6 -6
  44. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +1 -0
  45. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +1 -0
  46. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +1 -0
  47. vellum_ee/workflows/display/nodes/vellum/map_node.py +15 -12
  48. vellum_ee/workflows/display/nodes/vellum/merge_node.py +1 -0
  49. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +1 -0
  50. vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -0
  51. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +1 -0
  52. vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -0
  53. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +1 -0
  54. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +3 -0
  55. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +138 -0
  56. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +1 -0
  57. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +1 -0
  58. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +3 -2
  59. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +1 -0
  60. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +1 -0
  61. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +1 -0
  62. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +1 -0
  63. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +1 -0
  64. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +2 -2
  65. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +2 -2
  66. vellum/client/types/node_event_display_context.py +0 -30
  67. {vellum_ai-0.14.41.dist-info → vellum_ai-0.14.43.dist-info}/LICENSE +0 -0
  68. {vellum_ai-0.14.41.dist-info → vellum_ai-0.14.43.dist-info}/WHEEL +0 -0
  69. {vellum_ai-0.14.41.dist-info → vellum_ai-0.14.43.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -105,6 +105,7 @@ from .types import (
105
105
  ExecuteApiRequestBody,
106
106
  ExecuteApiRequestHeadersValue,
107
107
  ExecuteApiResponse,
108
+ ExecuteApiResponseJson,
108
109
  ExecutePromptEvent,
109
110
  ExecutePromptResponse,
110
111
  ExecuteWorkflowResponse,
@@ -229,7 +230,6 @@ from .types import (
229
230
  NamedTestCaseVariableValue,
230
231
  NamedTestCaseVariableValueRequest,
231
232
  NewMemberJoinBehaviorEnum,
232
- NodeEventDisplayContext,
233
233
  NodeExecutionFulfilledBody,
234
234
  NodeExecutionFulfilledEvent,
235
235
  NodeExecutionInitiatedBody,
@@ -310,6 +310,7 @@ from .types import (
310
310
  PromptNodeResultData,
311
311
  PromptOutput,
312
312
  PromptParameters,
313
+ PromptPushResponse,
313
314
  PromptRequestChatHistoryInput,
314
315
  PromptRequestInput,
315
316
  PromptRequestJsonInput,
@@ -518,7 +519,6 @@ from .types import (
518
519
  WorkflowDeploymentReleaseWorkflowDeployment,
519
520
  WorkflowDeploymentReleaseWorkflowVersion,
520
521
  WorkflowError,
521
- WorkflowEventDisplayContext,
522
522
  WorkflowEventError,
523
523
  WorkflowEventExecutionRead,
524
524
  WorkflowExecutionActual,
@@ -728,6 +728,7 @@ __all__ = [
728
728
  "ExecuteApiRequestBody",
729
729
  "ExecuteApiRequestHeadersValue",
730
730
  "ExecuteApiResponse",
731
+ "ExecuteApiResponseJson",
731
732
  "ExecutePromptEvent",
732
733
  "ExecutePromptResponse",
733
734
  "ExecuteWorkflowResponse",
@@ -858,7 +859,6 @@ __all__ = [
858
859
  "NamedTestCaseVariableValue",
859
860
  "NamedTestCaseVariableValueRequest",
860
861
  "NewMemberJoinBehaviorEnum",
861
- "NodeEventDisplayContext",
862
862
  "NodeExecutionFulfilledBody",
863
863
  "NodeExecutionFulfilledEvent",
864
864
  "NodeExecutionInitiatedBody",
@@ -940,6 +940,7 @@ __all__ = [
940
940
  "PromptNodeResultData",
941
941
  "PromptOutput",
942
942
  "PromptParameters",
943
+ "PromptPushResponse",
943
944
  "PromptRequestChatHistoryInput",
944
945
  "PromptRequestInput",
945
946
  "PromptRequestJsonInput",
@@ -1151,7 +1152,6 @@ __all__ = [
1151
1152
  "WorkflowDeploymentReleaseWorkflowVersion",
1152
1153
  "WorkflowDeploymentsListRequestStatus",
1153
1154
  "WorkflowError",
1154
- "WorkflowEventDisplayContext",
1155
1155
  "WorkflowEventError",
1156
1156
  "WorkflowEventExecutionRead",
1157
1157
  "WorkflowExecutionActual",
@@ -18,9 +18,9 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.41",
21
+ "X-Fern-SDK-Version": "0.14.43",
22
22
  }
23
- headers["X_API_KEY"] = self.api_key
23
+ headers["X-API-KEY"] = self.api_key
24
24
  return headers
25
25
 
26
26
  def get_environment(self) -> VellumEnvironment:
@@ -3408,7 +3408,7 @@ client.documents.retrieve(
3408
3408
  <dl>
3409
3409
  <dd>
3410
3410
 
3411
- **id:** `str` — A UUID string identifying this document.
3411
+ **id:** `str`
3412
3412
 
3413
3413
  </dd>
3414
3414
  </dl>
@@ -3478,7 +3478,7 @@ client.documents.destroy(
3478
3478
  <dl>
3479
3479
  <dd>
3480
3480
 
3481
- **id:** `str` — A UUID string identifying this document.
3481
+ **id:** `str`
3482
3482
 
3483
3483
  </dd>
3484
3484
  </dl>
@@ -3548,7 +3548,7 @@ client.documents.partial_update(
3548
3548
  <dl>
3549
3549
  <dd>
3550
3550
 
3551
- **id:** `str` — A UUID string identifying this document.
3551
+ **id:** `str`
3552
3552
 
3553
3553
  </dd>
3554
3554
  </dl>
@@ -4266,6 +4266,113 @@ client.prompts.pull(
4266
4266
  </dl>
4267
4267
 
4268
4268
 
4269
+ </dd>
4270
+ </dl>
4271
+ </details>
4272
+
4273
+ <details><summary><code>client.prompts.<a href="src/vellum/resources/prompts/client.py">push</a>(...)</code></summary>
4274
+ <dl>
4275
+ <dd>
4276
+
4277
+ #### 📝 Description
4278
+
4279
+ <dl>
4280
+ <dd>
4281
+
4282
+ <dl>
4283
+ <dd>
4284
+
4285
+ Used to push updates to a Prompt in Vellum.
4286
+ </dd>
4287
+ </dl>
4288
+ </dd>
4289
+ </dl>
4290
+
4291
+ #### 🔌 Usage
4292
+
4293
+ <dl>
4294
+ <dd>
4295
+
4296
+ <dl>
4297
+ <dd>
4298
+
4299
+ ```python
4300
+ from vellum import (
4301
+ JinjaPromptBlock,
4302
+ PromptExecConfig,
4303
+ PromptParameters,
4304
+ Vellum,
4305
+ VellumVariable,
4306
+ )
4307
+
4308
+ client = Vellum(
4309
+ api_key="YOUR_API_KEY",
4310
+ )
4311
+ client.prompts.push(
4312
+ exec_config=PromptExecConfig(
4313
+ ml_model="ml_model",
4314
+ input_variables=[
4315
+ VellumVariable(
4316
+ id="id",
4317
+ key="key",
4318
+ type="STRING",
4319
+ )
4320
+ ],
4321
+ parameters=PromptParameters(),
4322
+ blocks=[
4323
+ JinjaPromptBlock(
4324
+ template="template",
4325
+ )
4326
+ ],
4327
+ ),
4328
+ )
4329
+
4330
+ ```
4331
+ </dd>
4332
+ </dl>
4333
+ </dd>
4334
+ </dl>
4335
+
4336
+ #### ⚙️ Parameters
4337
+
4338
+ <dl>
4339
+ <dd>
4340
+
4341
+ <dl>
4342
+ <dd>
4343
+
4344
+ **exec_config:** `PromptExecConfig`
4345
+
4346
+ </dd>
4347
+ </dl>
4348
+
4349
+ <dl>
4350
+ <dd>
4351
+
4352
+ **prompt_variant_id:** `typing.Optional[str]`
4353
+
4354
+ </dd>
4355
+ </dl>
4356
+
4357
+ <dl>
4358
+ <dd>
4359
+
4360
+ **prompt_sandbox_id:** `typing.Optional[str]`
4361
+
4362
+ </dd>
4363
+ </dl>
4364
+
4365
+ <dl>
4366
+ <dd>
4367
+
4368
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
4369
+
4370
+ </dd>
4371
+ </dl>
4372
+ </dd>
4373
+ </dl>
4374
+
4375
+
4269
4376
  </dd>
4270
4377
  </dl>
4271
4378
  </details>
@@ -106,7 +106,6 @@ class DocumentsClient:
106
106
  Parameters
107
107
  ----------
108
108
  id : str
109
- A UUID string identifying this document.
110
109
 
111
110
  request_options : typing.Optional[RequestOptions]
112
111
  Request-specific configuration.
@@ -154,7 +153,6 @@ class DocumentsClient:
154
153
  Parameters
155
154
  ----------
156
155
  id : str
157
- A UUID string identifying this document.
158
156
 
159
157
  request_options : typing.Optional[RequestOptions]
160
158
  Request-specific configuration.
@@ -203,7 +201,6 @@ class DocumentsClient:
203
201
  Parameters
204
202
  ----------
205
203
  id : str
206
- A UUID string identifying this document.
207
204
 
208
205
  label : typing.Optional[str]
209
206
  A human-readable label for the document. Defaults to the originally uploaded file's file name.
@@ -471,7 +468,6 @@ class AsyncDocumentsClient:
471
468
  Parameters
472
469
  ----------
473
470
  id : str
474
- A UUID string identifying this document.
475
471
 
476
472
  request_options : typing.Optional[RequestOptions]
477
473
  Request-specific configuration.
@@ -527,7 +523,6 @@ class AsyncDocumentsClient:
527
523
  Parameters
528
524
  ----------
529
525
  id : str
530
- A UUID string identifying this document.
531
526
 
532
527
  request_options : typing.Optional[RequestOptions]
533
528
  Request-specific configuration.
@@ -584,7 +579,6 @@ class AsyncDocumentsClient:
584
579
  Parameters
585
580
  ----------
586
581
  id : str
587
- A UUID string identifying this document.
588
582
 
589
583
  label : typing.Optional[str]
590
584
  A human-readable label for the document. Defaults to the originally uploaded file's file name.
@@ -1,7 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ...core.client_wrapper import SyncClientWrapper
4
3
  import typing
4
+ from ...core.client_wrapper import SyncClientWrapper
5
5
  from ...core.request_options import RequestOptions
6
6
  from ...types.prompt_exec_config import PromptExecConfig
7
7
  from ...core.jsonable_encoder import jsonable_encoder
@@ -10,8 +10,13 @@ from ...errors.bad_request_error import BadRequestError
10
10
  from ...errors.not_found_error import NotFoundError
11
11
  from json.decoder import JSONDecodeError
12
12
  from ...core.api_error import ApiError
13
+ from ...types.prompt_push_response import PromptPushResponse
14
+ from ...core.serialization import convert_and_respect_annotation_metadata
13
15
  from ...core.client_wrapper import AsyncClientWrapper
14
16
 
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
15
20
 
16
21
  class PromptsClient:
17
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
@@ -100,6 +105,113 @@ class PromptsClient:
100
105
  raise ApiError(status_code=_response.status_code, body=_response.text)
101
106
  raise ApiError(status_code=_response.status_code, body=_response_json)
102
107
 
108
+ def push(
109
+ self,
110
+ *,
111
+ exec_config: PromptExecConfig,
112
+ prompt_variant_id: typing.Optional[str] = OMIT,
113
+ prompt_sandbox_id: typing.Optional[str] = OMIT,
114
+ request_options: typing.Optional[RequestOptions] = None,
115
+ ) -> PromptPushResponse:
116
+ """
117
+ Used to push updates to a Prompt in Vellum.
118
+
119
+ Parameters
120
+ ----------
121
+ exec_config : PromptExecConfig
122
+
123
+ prompt_variant_id : typing.Optional[str]
124
+
125
+ prompt_sandbox_id : typing.Optional[str]
126
+
127
+ request_options : typing.Optional[RequestOptions]
128
+ Request-specific configuration.
129
+
130
+ Returns
131
+ -------
132
+ PromptPushResponse
133
+
134
+
135
+ Examples
136
+ --------
137
+ from vellum import (
138
+ JinjaPromptBlock,
139
+ PromptExecConfig,
140
+ PromptParameters,
141
+ Vellum,
142
+ VellumVariable,
143
+ )
144
+
145
+ client = Vellum(
146
+ api_key="YOUR_API_KEY",
147
+ )
148
+ client.prompts.push(
149
+ exec_config=PromptExecConfig(
150
+ ml_model="ml_model",
151
+ input_variables=[
152
+ VellumVariable(
153
+ id="id",
154
+ key="key",
155
+ type="STRING",
156
+ )
157
+ ],
158
+ parameters=PromptParameters(),
159
+ blocks=[
160
+ JinjaPromptBlock(
161
+ template="template",
162
+ )
163
+ ],
164
+ ),
165
+ )
166
+ """
167
+ _response = self._client_wrapper.httpx_client.request(
168
+ "v1/prompts/push",
169
+ base_url=self._client_wrapper.get_environment().default,
170
+ method="POST",
171
+ json={
172
+ "exec_config": convert_and_respect_annotation_metadata(
173
+ object_=exec_config, annotation=PromptExecConfig, direction="write"
174
+ ),
175
+ "prompt_variant_id": prompt_variant_id,
176
+ "prompt_sandbox_id": prompt_sandbox_id,
177
+ },
178
+ request_options=request_options,
179
+ omit=OMIT,
180
+ )
181
+ try:
182
+ if 200 <= _response.status_code < 300:
183
+ return typing.cast(
184
+ PromptPushResponse,
185
+ parse_obj_as(
186
+ type_=PromptPushResponse, # type: ignore
187
+ object_=_response.json(),
188
+ ),
189
+ )
190
+ if _response.status_code == 400:
191
+ raise BadRequestError(
192
+ typing.cast(
193
+ typing.Optional[typing.Any],
194
+ parse_obj_as(
195
+ type_=typing.Optional[typing.Any], # type: ignore
196
+ object_=_response.json(),
197
+ ),
198
+ )
199
+ )
200
+ if _response.status_code == 404:
201
+ raise NotFoundError(
202
+ typing.cast(
203
+ typing.Optional[typing.Any],
204
+ parse_obj_as(
205
+ type_=typing.Optional[typing.Any], # type: ignore
206
+ object_=_response.json(),
207
+ ),
208
+ )
209
+ )
210
+ _response_json = _response.json()
211
+ except JSONDecodeError:
212
+ raise ApiError(status_code=_response.status_code, body=_response.text)
213
+ raise ApiError(status_code=_response.status_code, body=_response_json)
214
+
103
215
 
104
216
  class AsyncPromptsClient:
105
217
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -195,3 +307,118 @@ class AsyncPromptsClient:
195
307
  except JSONDecodeError:
196
308
  raise ApiError(status_code=_response.status_code, body=_response.text)
197
309
  raise ApiError(status_code=_response.status_code, body=_response_json)
310
+
311
+ async def push(
312
+ self,
313
+ *,
314
+ exec_config: PromptExecConfig,
315
+ prompt_variant_id: typing.Optional[str] = OMIT,
316
+ prompt_sandbox_id: typing.Optional[str] = OMIT,
317
+ request_options: typing.Optional[RequestOptions] = None,
318
+ ) -> PromptPushResponse:
319
+ """
320
+ Used to push updates to a Prompt in Vellum.
321
+
322
+ Parameters
323
+ ----------
324
+ exec_config : PromptExecConfig
325
+
326
+ prompt_variant_id : typing.Optional[str]
327
+
328
+ prompt_sandbox_id : typing.Optional[str]
329
+
330
+ request_options : typing.Optional[RequestOptions]
331
+ Request-specific configuration.
332
+
333
+ Returns
334
+ -------
335
+ PromptPushResponse
336
+
337
+
338
+ Examples
339
+ --------
340
+ import asyncio
341
+
342
+ from vellum import (
343
+ AsyncVellum,
344
+ JinjaPromptBlock,
345
+ PromptExecConfig,
346
+ PromptParameters,
347
+ VellumVariable,
348
+ )
349
+
350
+ client = AsyncVellum(
351
+ api_key="YOUR_API_KEY",
352
+ )
353
+
354
+
355
+ async def main() -> None:
356
+ await client.prompts.push(
357
+ exec_config=PromptExecConfig(
358
+ ml_model="ml_model",
359
+ input_variables=[
360
+ VellumVariable(
361
+ id="id",
362
+ key="key",
363
+ type="STRING",
364
+ )
365
+ ],
366
+ parameters=PromptParameters(),
367
+ blocks=[
368
+ JinjaPromptBlock(
369
+ template="template",
370
+ )
371
+ ],
372
+ ),
373
+ )
374
+
375
+
376
+ asyncio.run(main())
377
+ """
378
+ _response = await self._client_wrapper.httpx_client.request(
379
+ "v1/prompts/push",
380
+ base_url=self._client_wrapper.get_environment().default,
381
+ method="POST",
382
+ json={
383
+ "exec_config": convert_and_respect_annotation_metadata(
384
+ object_=exec_config, annotation=PromptExecConfig, direction="write"
385
+ ),
386
+ "prompt_variant_id": prompt_variant_id,
387
+ "prompt_sandbox_id": prompt_sandbox_id,
388
+ },
389
+ request_options=request_options,
390
+ omit=OMIT,
391
+ )
392
+ try:
393
+ if 200 <= _response.status_code < 300:
394
+ return typing.cast(
395
+ PromptPushResponse,
396
+ parse_obj_as(
397
+ type_=PromptPushResponse, # type: ignore
398
+ object_=_response.json(),
399
+ ),
400
+ )
401
+ if _response.status_code == 400:
402
+ raise BadRequestError(
403
+ typing.cast(
404
+ typing.Optional[typing.Any],
405
+ parse_obj_as(
406
+ type_=typing.Optional[typing.Any], # type: ignore
407
+ object_=_response.json(),
408
+ ),
409
+ )
410
+ )
411
+ if _response.status_code == 404:
412
+ raise NotFoundError(
413
+ typing.cast(
414
+ typing.Optional[typing.Any],
415
+ parse_obj_as(
416
+ type_=typing.Optional[typing.Any], # type: ignore
417
+ object_=_response.json(),
418
+ ),
419
+ )
420
+ )
421
+ _response_json = _response.json()
422
+ except JSONDecodeError:
423
+ raise ApiError(status_code=_response.status_code, body=_response.text)
424
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -109,6 +109,7 @@ from .execute_api_request_bearer_token import ExecuteApiRequestBearerToken
109
109
  from .execute_api_request_body import ExecuteApiRequestBody
110
110
  from .execute_api_request_headers_value import ExecuteApiRequestHeadersValue
111
111
  from .execute_api_response import ExecuteApiResponse
112
+ from .execute_api_response_json import ExecuteApiResponseJson
112
113
  from .execute_prompt_event import ExecutePromptEvent
113
114
  from .execute_prompt_response import ExecutePromptResponse
114
115
  from .execute_workflow_response import ExecuteWorkflowResponse
@@ -237,7 +238,6 @@ from .named_test_case_string_variable_value_request import NamedTestCaseStringVa
237
238
  from .named_test_case_variable_value import NamedTestCaseVariableValue
238
239
  from .named_test_case_variable_value_request import NamedTestCaseVariableValueRequest
239
240
  from .new_member_join_behavior_enum import NewMemberJoinBehaviorEnum
240
- from .node_event_display_context import NodeEventDisplayContext
241
241
  from .node_execution_fulfilled_body import NodeExecutionFulfilledBody
242
242
  from .node_execution_fulfilled_event import NodeExecutionFulfilledEvent
243
243
  from .node_execution_initiated_body import NodeExecutionInitiatedBody
@@ -318,6 +318,7 @@ from .prompt_node_result import PromptNodeResult
318
318
  from .prompt_node_result_data import PromptNodeResultData
319
319
  from .prompt_output import PromptOutput
320
320
  from .prompt_parameters import PromptParameters
321
+ from .prompt_push_response import PromptPushResponse
321
322
  from .prompt_request_chat_history_input import PromptRequestChatHistoryInput
322
323
  from .prompt_request_input import PromptRequestInput
323
324
  from .prompt_request_json_input import PromptRequestJsonInput
@@ -542,7 +543,6 @@ from .workflow_deployment_release import WorkflowDeploymentRelease
542
543
  from .workflow_deployment_release_workflow_deployment import WorkflowDeploymentReleaseWorkflowDeployment
543
544
  from .workflow_deployment_release_workflow_version import WorkflowDeploymentReleaseWorkflowVersion
544
545
  from .workflow_error import WorkflowError
545
- from .workflow_event_display_context import WorkflowEventDisplayContext
546
546
  from .workflow_event_error import WorkflowEventError
547
547
  from .workflow_event_execution_read import WorkflowEventExecutionRead
548
548
  from .workflow_execution_actual import WorkflowExecutionActual
@@ -715,6 +715,7 @@ __all__ = [
715
715
  "ExecuteApiRequestBody",
716
716
  "ExecuteApiRequestHeadersValue",
717
717
  "ExecuteApiResponse",
718
+ "ExecuteApiResponseJson",
718
719
  "ExecutePromptEvent",
719
720
  "ExecutePromptResponse",
720
721
  "ExecuteWorkflowResponse",
@@ -839,7 +840,6 @@ __all__ = [
839
840
  "NamedTestCaseVariableValue",
840
841
  "NamedTestCaseVariableValueRequest",
841
842
  "NewMemberJoinBehaviorEnum",
842
- "NodeEventDisplayContext",
843
843
  "NodeExecutionFulfilledBody",
844
844
  "NodeExecutionFulfilledEvent",
845
845
  "NodeExecutionInitiatedBody",
@@ -920,6 +920,7 @@ __all__ = [
920
920
  "PromptNodeResultData",
921
921
  "PromptOutput",
922
922
  "PromptParameters",
923
+ "PromptPushResponse",
923
924
  "PromptRequestChatHistoryInput",
924
925
  "PromptRequestInput",
925
926
  "PromptRequestJsonInput",
@@ -1128,7 +1129,6 @@ __all__ = [
1128
1129
  "WorkflowDeploymentReleaseWorkflowDeployment",
1129
1130
  "WorkflowDeploymentReleaseWorkflowVersion",
1130
1131
  "WorkflowError",
1131
- "WorkflowEventDisplayContext",
1132
1132
  "WorkflowEventError",
1133
1133
  "WorkflowEventExecutionRead",
1134
1134
  "WorkflowExecutionActual",
@@ -50,9 +50,9 @@ class DeploymentRead(UniversalBaseModel):
50
50
  A human-readable description of the deployment
51
51
  """
52
52
 
53
- active_model_version_ids: typing.List[str] = pydantic.Field()
53
+ active_model_version_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
54
54
  """
55
- Deprecated. The Prompt execution endpoints return a `prompt_version_id` that could be used instead.
55
+ Deprecated. This now always returns a null value.
56
56
  """
57
57
 
58
58
  last_deployed_history_item_id: str = pydantic.Field()
@@ -2,8 +2,9 @@
2
2
 
3
3
  from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import typing_extensions
5
- import typing
5
+ from .execute_api_response_json import ExecuteApiResponseJson
6
6
  from ..core.serialization import FieldMetadata
7
+ import typing
7
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
9
  import pydantic
9
10
 
@@ -11,9 +12,7 @@ import pydantic
11
12
  class ExecuteApiResponse(UniversalBaseModel):
12
13
  status_code: int
13
14
  text: str
14
- json_: typing_extensions.Annotated[
15
- typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="json")
16
- ] = None
15
+ json_: typing_extensions.Annotated[ExecuteApiResponseJson, FieldMetadata(alias="json")]
17
16
  headers: typing.Dict[str, str]
18
17
 
19
18
  if IS_PYDANTIC_V2:
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExecuteApiResponseJson = typing.Union[
6
+ typing.Dict[str, typing.Optional[typing.Any]], typing.List[typing.Optional[typing.Any]]
7
+ ]
@@ -1,17 +1,14 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from __future__ import annotations
4
3
  from ..core.pydantic_utilities import UniversalBaseModel
5
- import typing
6
4
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
5
+ import typing
7
6
  import pydantic
8
- from ..core.pydantic_utilities import update_forward_refs
9
7
 
10
8
 
11
- class WorkflowEventDisplayContext(UniversalBaseModel):
12
- node_displays: typing.Dict[str, "NodeEventDisplayContext"]
13
- workflow_inputs: typing.Dict[str, str]
14
- workflow_outputs: typing.Dict[str, str]
9
+ class PromptPushResponse(UniversalBaseModel):
10
+ prompt_variant_id: str
11
+ prompt_sandbox_id: str
15
12
 
16
13
  if IS_PYDANTIC_V2:
17
14
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -21,8 +18,3 @@ class WorkflowEventDisplayContext(UniversalBaseModel):
21
18
  frozen = True
22
19
  smart_union = True
23
20
  extra = pydantic.Extra.allow
24
-
25
-
26
- from .node_event_display_context import NodeEventDisplayContext # noqa: E402
27
-
28
- update_forward_refs(WorkflowEventDisplayContext)
@@ -8,6 +8,7 @@ import pydantic
8
8
 
9
9
  class PromptSettings(UniversalBaseModel):
10
10
  timeout: typing.Optional[float] = None
11
+ stream_enabled: typing.Optional[bool] = None
11
12
 
12
13
  if IS_PYDANTIC_V2:
13
14
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -10,8 +10,6 @@ from .workflow_deployment_parent_context import WorkflowDeploymentParentContext
10
10
  from .workflow_parent_context import WorkflowParentContext
11
11
  from .workflow_sandbox_parent_context import WorkflowSandboxParentContext
12
12
  from .array_vellum_value import ArrayVellumValue
13
- from .node_event_display_context import NodeEventDisplayContext
14
- from .workflow_event_display_context import WorkflowEventDisplayContext
15
13
  import typing
16
14
  import datetime as dt
17
15
  from .execution_vellum_value import ExecutionVellumValue
@@ -56,5 +54,3 @@ update_forward_refs(WorkflowDeploymentParentContext, WorkflowEventExecutionRead=
56
54
  update_forward_refs(WorkflowParentContext, WorkflowEventExecutionRead=WorkflowEventExecutionRead)
57
55
  update_forward_refs(WorkflowSandboxParentContext, WorkflowEventExecutionRead=WorkflowEventExecutionRead)
58
56
  update_forward_refs(ArrayVellumValue, WorkflowEventExecutionRead=WorkflowEventExecutionRead)
59
- update_forward_refs(NodeEventDisplayContext, WorkflowEventExecutionRead=WorkflowEventExecutionRead)
60
- update_forward_refs(WorkflowEventDisplayContext, WorkflowEventExecutionRead=WorkflowEventExecutionRead)