vellum-ai 0.9.6__py3-none-any.whl → 0.9.7__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. vellum/__init__.py +14 -46
  2. vellum/core/client_wrapper.py +1 -1
  3. vellum/resources/ad_hoc/client.py +75 -108
  4. vellum/types/__init__.py +14 -46
  5. vellum/types/{ad_hoc_expand_meta_request.py → ad_hoc_expand_meta.py} +1 -1
  6. vellum/types/{prompt_parameters_request.py → prompt_parameters.py} +1 -1
  7. vellum/types/{prompt_request_chat_history_input_request.py → prompt_request_chat_history_input.py} +3 -3
  8. vellum/types/prompt_request_input.py +8 -0
  9. vellum/types/{prompt_request_json_input_request.py → prompt_request_json_input.py} +1 -1
  10. vellum/types/{prompt_request_string_input_request.py → prompt_request_string_input.py} +1 -1
  11. vellum/types/{prompt_settings_request.py → prompt_settings.py} +1 -1
  12. {vellum_ai-0.9.6.dist-info → vellum_ai-0.9.7.dist-info}/METADATA +1 -1
  13. {vellum_ai-0.9.6.dist-info → vellum_ai-0.9.7.dist-info}/RECORD +15 -31
  14. vellum/types/chat_message_prompt_block_properties_request.py +0 -38
  15. vellum/types/chat_message_prompt_block_request.py +0 -38
  16. vellum/types/ephemeral_prompt_cache_config_request.py +0 -20
  17. vellum/types/ephemeral_prompt_cache_config_type_enum.py +0 -5
  18. vellum/types/function_definition_prompt_block_properties_request.py +0 -42
  19. vellum/types/function_definition_prompt_block_request.py +0 -29
  20. vellum/types/jinja_prompt_block_properties_request.py +0 -21
  21. vellum/types/jinja_prompt_block_request.py +0 -29
  22. vellum/types/plain_text_prompt_block_request.py +0 -28
  23. vellum/types/prompt_block_request.py +0 -19
  24. vellum/types/prompt_block_state.py +0 -5
  25. vellum/types/prompt_request_input_request.py +0 -10
  26. vellum/types/rich_text_child_block_request.py +0 -7
  27. vellum/types/rich_text_prompt_block_request.py +0 -30
  28. vellum/types/variable_prompt_block_request.py +0 -28
  29. vellum/types/vellum_variable_extensions_request.py +0 -23
  30. vellum/types/vellum_variable_request.py +0 -33
  31. {vellum_ai-0.9.6.dist-info → vellum_ai-0.9.7.dist-info}/LICENSE +0 -0
  32. {vellum_ai-0.9.6.dist-info → vellum_ai-0.9.7.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from .types import (
4
4
  AdHocExecutePromptEvent,
5
- AdHocExpandMetaRequest,
5
+ AdHocExpandMeta,
6
6
  AdHocFulfilledPromptExecutionMeta,
7
7
  AdHocInitiatedPromptExecutionMeta,
8
8
  AdHocRejectedPromptExecutionMeta,
@@ -37,8 +37,6 @@ from .types import (
37
37
  ChatMessage,
38
38
  ChatMessageContent,
39
39
  ChatMessageContentRequest,
40
- ChatMessagePromptBlockPropertiesRequest,
41
- ChatMessagePromptBlockRequest,
42
40
  ChatMessageRequest,
43
41
  ChatMessageRole,
44
42
  CodeExecutionNodeArrayResult,
@@ -84,8 +82,6 @@ from .types import (
84
82
  EntityStatus,
85
83
  EntityVisibility,
86
84
  EnvironmentEnum,
87
- EphemeralPromptCacheConfigRequest,
88
- EphemeralPromptCacheConfigTypeEnum,
89
85
  ErrorInputRequest,
90
86
  ErrorVariableValue,
91
87
  ErrorVellumValue,
@@ -134,8 +130,6 @@ from .types import (
134
130
  FunctionCallVariableValue,
135
131
  FunctionCallVellumValue,
136
132
  FunctionCallVellumValueRequest,
137
- FunctionDefinitionPromptBlockPropertiesRequest,
138
- FunctionDefinitionPromptBlockRequest,
139
133
  GenerateOptionsRequest,
140
134
  GenerateRequest,
141
135
  GenerateResponse,
@@ -168,8 +162,6 @@ from .types import (
168
162
  InstructorVectorizerConfig,
169
163
  InstructorVectorizerConfigRequest,
170
164
  IterationStateEnum,
171
- JinjaPromptBlockPropertiesRequest,
172
- JinjaPromptBlockRequest,
173
165
  JsonInputRequest,
174
166
  JsonVariableValue,
175
167
  JsonVellumValue,
@@ -254,12 +246,9 @@ from .types import (
254
246
  PaginatedWorkflowReleaseTagReadList,
255
247
  PdfSearchResultMetaSource,
256
248
  PdfSearchResultMetaSourceRequest,
257
- PlainTextPromptBlockRequest,
258
249
  Price,
259
250
  ProcessingFailureReasonEnum,
260
251
  ProcessingStateEnum,
261
- PromptBlockRequest,
262
- PromptBlockState,
263
252
  PromptDeploymentExpandMetaRequest,
264
253
  PromptDeploymentInputRequest,
265
254
  PromptExecutionMeta,
@@ -267,12 +256,12 @@ from .types import (
267
256
  PromptNodeResult,
268
257
  PromptNodeResultData,
269
258
  PromptOutput,
270
- PromptParametersRequest,
271
- PromptRequestChatHistoryInputRequest,
272
- PromptRequestInputRequest,
273
- PromptRequestJsonInputRequest,
274
- PromptRequestStringInputRequest,
275
- PromptSettingsRequest,
259
+ PromptParameters,
260
+ PromptRequestChatHistoryInput,
261
+ PromptRequestInput,
262
+ PromptRequestJsonInput,
263
+ PromptRequestStringInput,
264
+ PromptSettings,
276
265
  RawPromptExecutionOverridesRequest,
277
266
  ReductoChunkerConfig,
278
267
  ReductoChunkerConfigRequest,
@@ -286,8 +275,6 @@ from .types import (
286
275
  RejectedWorkflowNodeResultEvent,
287
276
  ReleaseTagSource,
288
277
  ReplaceTestSuiteTestCaseRequest,
289
- RichTextChildBlockRequest,
290
- RichTextPromptBlockRequest,
291
278
  SandboxScenario,
292
279
  ScenarioInput,
293
280
  ScenarioInputChatHistoryVariableValue,
@@ -419,7 +406,6 @@ from .types import (
419
406
  UnitEnum,
420
407
  UploadDocumentResponse,
421
408
  UpsertTestSuiteTestCaseRequest,
422
- VariablePromptBlockRequest,
423
409
  VellumAudio,
424
410
  VellumAudioRequest,
425
411
  VellumError,
@@ -434,8 +420,6 @@ from .types import (
434
420
  VellumValueRequest,
435
421
  VellumVariable,
436
422
  VellumVariableExtensions,
437
- VellumVariableExtensionsRequest,
438
- VellumVariableRequest,
439
423
  VellumVariableType,
440
424
  WorkflowDeploymentRead,
441
425
  WorkflowEventError,
@@ -513,7 +497,7 @@ from .version import __version__
513
497
 
514
498
  __all__ = [
515
499
  "AdHocExecutePromptEvent",
516
- "AdHocExpandMetaRequest",
500
+ "AdHocExpandMeta",
517
501
  "AdHocFulfilledPromptExecutionMeta",
518
502
  "AdHocInitiatedPromptExecutionMeta",
519
503
  "AdHocRejectedPromptExecutionMeta",
@@ -550,8 +534,6 @@ __all__ = [
550
534
  "ChatMessage",
551
535
  "ChatMessageContent",
552
536
  "ChatMessageContentRequest",
553
- "ChatMessagePromptBlockPropertiesRequest",
554
- "ChatMessagePromptBlockRequest",
555
537
  "ChatMessageRequest",
556
538
  "ChatMessageRole",
557
539
  "CodeExecutionNodeArrayResult",
@@ -599,8 +581,6 @@ __all__ = [
599
581
  "EntityStatus",
600
582
  "EntityVisibility",
601
583
  "EnvironmentEnum",
602
- "EphemeralPromptCacheConfigRequest",
603
- "EphemeralPromptCacheConfigTypeEnum",
604
584
  "ErrorInputRequest",
605
585
  "ErrorVariableValue",
606
586
  "ErrorVellumValue",
@@ -651,8 +631,6 @@ __all__ = [
651
631
  "FunctionCallVariableValue",
652
632
  "FunctionCallVellumValue",
653
633
  "FunctionCallVellumValueRequest",
654
- "FunctionDefinitionPromptBlockPropertiesRequest",
655
- "FunctionDefinitionPromptBlockRequest",
656
634
  "GenerateOptionsRequest",
657
635
  "GenerateRequest",
658
636
  "GenerateResponse",
@@ -686,8 +664,6 @@ __all__ = [
686
664
  "InstructorVectorizerConfigRequest",
687
665
  "InternalServerError",
688
666
  "IterationStateEnum",
689
- "JinjaPromptBlockPropertiesRequest",
690
- "JinjaPromptBlockRequest",
691
667
  "JsonInputRequest",
692
668
  "JsonVariableValue",
693
669
  "JsonVellumValue",
@@ -775,12 +751,9 @@ __all__ = [
775
751
  "PaginatedWorkflowReleaseTagReadList",
776
752
  "PdfSearchResultMetaSource",
777
753
  "PdfSearchResultMetaSourceRequest",
778
- "PlainTextPromptBlockRequest",
779
754
  "Price",
780
755
  "ProcessingFailureReasonEnum",
781
756
  "ProcessingStateEnum",
782
- "PromptBlockRequest",
783
- "PromptBlockState",
784
757
  "PromptDeploymentExpandMetaRequest",
785
758
  "PromptDeploymentInputRequest",
786
759
  "PromptExecutionMeta",
@@ -788,12 +761,12 @@ __all__ = [
788
761
  "PromptNodeResult",
789
762
  "PromptNodeResultData",
790
763
  "PromptOutput",
791
- "PromptParametersRequest",
792
- "PromptRequestChatHistoryInputRequest",
793
- "PromptRequestInputRequest",
794
- "PromptRequestJsonInputRequest",
795
- "PromptRequestStringInputRequest",
796
- "PromptSettingsRequest",
764
+ "PromptParameters",
765
+ "PromptRequestChatHistoryInput",
766
+ "PromptRequestInput",
767
+ "PromptRequestJsonInput",
768
+ "PromptRequestStringInput",
769
+ "PromptSettings",
797
770
  "RawPromptExecutionOverridesRequest",
798
771
  "ReductoChunkerConfig",
799
772
  "ReductoChunkerConfigRequest",
@@ -807,8 +780,6 @@ __all__ = [
807
780
  "RejectedWorkflowNodeResultEvent",
808
781
  "ReleaseTagSource",
809
782
  "ReplaceTestSuiteTestCaseRequest",
810
- "RichTextChildBlockRequest",
811
- "RichTextPromptBlockRequest",
812
783
  "SandboxScenario",
813
784
  "ScenarioInput",
814
785
  "ScenarioInputChatHistoryVariableValue",
@@ -940,7 +911,6 @@ __all__ = [
940
911
  "UnitEnum",
941
912
  "UploadDocumentResponse",
942
913
  "UpsertTestSuiteTestCaseRequest",
943
- "VariablePromptBlockRequest",
944
914
  "Vellum",
945
915
  "VellumAudio",
946
916
  "VellumAudioRequest",
@@ -957,8 +927,6 @@ __all__ = [
957
927
  "VellumValueRequest",
958
928
  "VellumVariable",
959
929
  "VellumVariableExtensions",
960
- "VellumVariableExtensionsRequest",
961
- "VellumVariableRequest",
962
930
  "VellumVariableType",
963
931
  "WorkflowDeploymentRead",
964
932
  "WorkflowDeploymentsListRequestStatus",
@@ -17,7 +17,7 @@ class BaseClientWrapper:
17
17
  headers: typing.Dict[str, str] = {
18
18
  "X-Fern-Language": "Python",
19
19
  "X-Fern-SDK-Name": "vellum-ai",
20
- "X-Fern-SDK-Version": "0.9.6",
20
+ "X-Fern-SDK-Version": "0.9.7",
21
21
  }
22
22
  headers["X_API_KEY"] = self.api_key
23
23
  return headers
@@ -2,12 +2,11 @@
2
2
 
3
3
  import typing
4
4
  from ...core.client_wrapper import SyncClientWrapper
5
- from ...types.prompt_request_input_request import PromptRequestInputRequest
6
- from ...types.vellum_variable_request import VellumVariableRequest
7
- from ...types.prompt_parameters_request import PromptParametersRequest
8
- from ...types.prompt_block_request import PromptBlockRequest
9
- from ...types.prompt_settings_request import PromptSettingsRequest
10
- from ...types.ad_hoc_expand_meta_request import AdHocExpandMetaRequest
5
+ from ...types.prompt_request_input import PromptRequestInput
6
+ from ...types.vellum_variable import VellumVariable
7
+ from ...types.prompt_parameters import PromptParameters
8
+ from ...types.prompt_settings import PromptSettings
9
+ from ...types.ad_hoc_expand_meta import AdHocExpandMeta
11
10
  from ...core.request_options import RequestOptions
12
11
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
13
12
  from ...core.serialization import convert_and_respect_annotation_metadata
@@ -32,12 +31,12 @@ class AdHocClient:
32
31
  self,
33
32
  *,
34
33
  ml_model: str,
35
- input_values: typing.Sequence[PromptRequestInputRequest],
36
- input_variables: typing.Sequence[VellumVariableRequest],
37
- parameters: PromptParametersRequest,
38
- blocks: typing.Sequence[PromptBlockRequest],
39
- settings: typing.Optional[PromptSettingsRequest] = OMIT,
40
- expand_meta: typing.Optional[AdHocExpandMetaRequest] = OMIT,
34
+ input_values: typing.Sequence[PromptRequestInput],
35
+ input_variables: typing.Sequence[VellumVariable],
36
+ parameters: PromptParameters,
37
+ blocks: typing.Sequence[typing.Optional[typing.Any]],
38
+ settings: typing.Optional[PromptSettings] = OMIT,
39
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
41
40
  request_options: typing.Optional[RequestOptions] = None,
42
41
  ) -> typing.Iterator[AdHocExecutePromptEvent]:
43
42
  """
@@ -47,17 +46,17 @@ class AdHocClient:
47
46
  ----------
48
47
  ml_model : str
49
48
 
50
- input_values : typing.Sequence[PromptRequestInputRequest]
49
+ input_values : typing.Sequence[PromptRequestInput]
51
50
 
52
- input_variables : typing.Sequence[VellumVariableRequest]
51
+ input_variables : typing.Sequence[VellumVariable]
53
52
 
54
- parameters : PromptParametersRequest
53
+ parameters : PromptParameters
55
54
 
56
- blocks : typing.Sequence[PromptBlockRequest]
55
+ blocks : typing.Sequence[typing.Optional[typing.Any]]
57
56
 
58
- settings : typing.Optional[PromptSettingsRequest]
57
+ settings : typing.Optional[PromptSettings]
59
58
 
60
- expand_meta : typing.Optional[AdHocExpandMetaRequest]
59
+ expand_meta : typing.Optional[AdHocExpandMeta]
61
60
 
62
61
  request_options : typing.Optional[RequestOptions]
63
62
  Request-specific configuration.
@@ -70,17 +69,14 @@ class AdHocClient:
70
69
  Examples
71
70
  --------
72
71
  from vellum import (
73
- AdHocExpandMetaRequest,
74
- EphemeralPromptCacheConfigRequest,
75
- JinjaPromptBlockPropertiesRequest,
76
- JinjaPromptBlockRequest,
77
- PromptParametersRequest,
78
- PromptRequestStringInputRequest,
79
- PromptSettingsRequest,
80
- StringVellumValueRequest,
72
+ AdHocExpandMeta,
73
+ PromptParameters,
74
+ PromptRequestStringInput,
75
+ PromptSettings,
76
+ StringVellumValue,
81
77
  Vellum,
82
- VellumVariableExtensionsRequest,
83
- VellumVariableRequest,
78
+ VellumVariable,
79
+ VellumVariableExtensions,
84
80
  )
85
81
 
86
82
  client = Vellum(
@@ -89,26 +85,26 @@ class AdHocClient:
89
85
  response = client.ad_hoc.adhoc_execute_prompt_stream(
90
86
  ml_model="string",
91
87
  input_values=[
92
- PromptRequestStringInputRequest(
88
+ PromptRequestStringInput(
93
89
  key="string",
94
90
  value="string",
95
91
  )
96
92
  ],
97
93
  input_variables=[
98
- VellumVariableRequest(
94
+ VellumVariable(
99
95
  id="string",
100
96
  key="string",
101
97
  type="STRING",
102
98
  required=True,
103
- default=StringVellumValueRequest(
104
- value="string",
99
+ default=StringVellumValue(
100
+ value={"key": "value"},
105
101
  ),
106
- extensions=VellumVariableExtensionsRequest(
107
- color="string",
102
+ extensions=VellumVariableExtensions(
103
+ color={"key": "value"},
108
104
  ),
109
105
  )
110
106
  ],
111
- parameters=PromptParametersRequest(
107
+ parameters=PromptParameters(
112
108
  stop=["string"],
113
109
  temperature=1.1,
114
110
  max_tokens=1,
@@ -119,22 +115,11 @@ class AdHocClient:
119
115
  logit_bias={"string": {"key": "value"}},
120
116
  custom_parameters={"string": {"key": "value"}},
121
117
  ),
122
- settings=PromptSettingsRequest(
118
+ settings=PromptSettings(
123
119
  timeout=1.1,
124
120
  ),
125
- blocks=[
126
- JinjaPromptBlockRequest(
127
- state="ENABLED",
128
- cache_config=EphemeralPromptCacheConfigRequest(
129
- type={"key": "value"},
130
- ),
131
- properties=JinjaPromptBlockPropertiesRequest(
132
- template="string",
133
- template_type="STRING",
134
- ),
135
- )
136
- ],
137
- expand_meta=AdHocExpandMetaRequest(
121
+ blocks=[{"key": "value"}],
122
+ expand_meta=AdHocExpandMeta(
138
123
  cost=True,
139
124
  model_name=True,
140
125
  usage=True,
@@ -151,22 +136,20 @@ class AdHocClient:
151
136
  json={
152
137
  "ml_model": ml_model,
153
138
  "input_values": convert_and_respect_annotation_metadata(
154
- object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
139
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
155
140
  ),
156
141
  "input_variables": convert_and_respect_annotation_metadata(
157
- object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
142
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
158
143
  ),
159
144
  "parameters": convert_and_respect_annotation_metadata(
160
- object_=parameters, annotation=PromptParametersRequest, direction="write"
145
+ object_=parameters, annotation=PromptParameters, direction="write"
161
146
  ),
162
147
  "settings": convert_and_respect_annotation_metadata(
163
- object_=settings, annotation=PromptSettingsRequest, direction="write"
164
- ),
165
- "blocks": convert_and_respect_annotation_metadata(
166
- object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
148
+ object_=settings, annotation=PromptSettings, direction="write"
167
149
  ),
150
+ "blocks": blocks,
168
151
  "expand_meta": convert_and_respect_annotation_metadata(
169
- object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
152
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
170
153
  ),
171
154
  },
172
155
  request_options=request_options,
@@ -233,12 +216,12 @@ class AsyncAdHocClient:
233
216
  self,
234
217
  *,
235
218
  ml_model: str,
236
- input_values: typing.Sequence[PromptRequestInputRequest],
237
- input_variables: typing.Sequence[VellumVariableRequest],
238
- parameters: PromptParametersRequest,
239
- blocks: typing.Sequence[PromptBlockRequest],
240
- settings: typing.Optional[PromptSettingsRequest] = OMIT,
241
- expand_meta: typing.Optional[AdHocExpandMetaRequest] = OMIT,
219
+ input_values: typing.Sequence[PromptRequestInput],
220
+ input_variables: typing.Sequence[VellumVariable],
221
+ parameters: PromptParameters,
222
+ blocks: typing.Sequence[typing.Optional[typing.Any]],
223
+ settings: typing.Optional[PromptSettings] = OMIT,
224
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
242
225
  request_options: typing.Optional[RequestOptions] = None,
243
226
  ) -> typing.AsyncIterator[AdHocExecutePromptEvent]:
244
227
  """
@@ -248,17 +231,17 @@ class AsyncAdHocClient:
248
231
  ----------
249
232
  ml_model : str
250
233
 
251
- input_values : typing.Sequence[PromptRequestInputRequest]
234
+ input_values : typing.Sequence[PromptRequestInput]
252
235
 
253
- input_variables : typing.Sequence[VellumVariableRequest]
236
+ input_variables : typing.Sequence[VellumVariable]
254
237
 
255
- parameters : PromptParametersRequest
238
+ parameters : PromptParameters
256
239
 
257
- blocks : typing.Sequence[PromptBlockRequest]
240
+ blocks : typing.Sequence[typing.Optional[typing.Any]]
258
241
 
259
- settings : typing.Optional[PromptSettingsRequest]
242
+ settings : typing.Optional[PromptSettings]
260
243
 
261
- expand_meta : typing.Optional[AdHocExpandMetaRequest]
244
+ expand_meta : typing.Optional[AdHocExpandMeta]
262
245
 
263
246
  request_options : typing.Optional[RequestOptions]
264
247
  Request-specific configuration.
@@ -273,17 +256,14 @@ class AsyncAdHocClient:
273
256
  import asyncio
274
257
 
275
258
  from vellum import (
276
- AdHocExpandMetaRequest,
259
+ AdHocExpandMeta,
277
260
  AsyncVellum,
278
- EphemeralPromptCacheConfigRequest,
279
- JinjaPromptBlockPropertiesRequest,
280
- JinjaPromptBlockRequest,
281
- PromptParametersRequest,
282
- PromptRequestStringInputRequest,
283
- PromptSettingsRequest,
284
- StringVellumValueRequest,
285
- VellumVariableExtensionsRequest,
286
- VellumVariableRequest,
261
+ PromptParameters,
262
+ PromptRequestStringInput,
263
+ PromptSettings,
264
+ StringVellumValue,
265
+ VellumVariable,
266
+ VellumVariableExtensions,
287
267
  )
288
268
 
289
269
  client = AsyncVellum(
@@ -295,26 +275,26 @@ class AsyncAdHocClient:
295
275
  response = await client.ad_hoc.adhoc_execute_prompt_stream(
296
276
  ml_model="string",
297
277
  input_values=[
298
- PromptRequestStringInputRequest(
278
+ PromptRequestStringInput(
299
279
  key="string",
300
280
  value="string",
301
281
  )
302
282
  ],
303
283
  input_variables=[
304
- VellumVariableRequest(
284
+ VellumVariable(
305
285
  id="string",
306
286
  key="string",
307
287
  type="STRING",
308
288
  required=True,
309
- default=StringVellumValueRequest(
310
- value="string",
289
+ default=StringVellumValue(
290
+ value={"key": "value"},
311
291
  ),
312
- extensions=VellumVariableExtensionsRequest(
313
- color="string",
292
+ extensions=VellumVariableExtensions(
293
+ color={"key": "value"},
314
294
  ),
315
295
  )
316
296
  ],
317
- parameters=PromptParametersRequest(
297
+ parameters=PromptParameters(
318
298
  stop=["string"],
319
299
  temperature=1.1,
320
300
  max_tokens=1,
@@ -325,22 +305,11 @@ class AsyncAdHocClient:
325
305
  logit_bias={"string": {"key": "value"}},
326
306
  custom_parameters={"string": {"key": "value"}},
327
307
  ),
328
- settings=PromptSettingsRequest(
308
+ settings=PromptSettings(
329
309
  timeout=1.1,
330
310
  ),
331
- blocks=[
332
- JinjaPromptBlockRequest(
333
- state="ENABLED",
334
- cache_config=EphemeralPromptCacheConfigRequest(
335
- type={"key": "value"},
336
- ),
337
- properties=JinjaPromptBlockPropertiesRequest(
338
- template="string",
339
- template_type="STRING",
340
- ),
341
- )
342
- ],
343
- expand_meta=AdHocExpandMetaRequest(
311
+ blocks=[{"key": "value"}],
312
+ expand_meta=AdHocExpandMeta(
344
313
  cost=True,
345
314
  model_name=True,
346
315
  usage=True,
@@ -360,22 +329,20 @@ class AsyncAdHocClient:
360
329
  json={
361
330
  "ml_model": ml_model,
362
331
  "input_values": convert_and_respect_annotation_metadata(
363
- object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
332
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
364
333
  ),
365
334
  "input_variables": convert_and_respect_annotation_metadata(
366
- object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
335
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
367
336
  ),
368
337
  "parameters": convert_and_respect_annotation_metadata(
369
- object_=parameters, annotation=PromptParametersRequest, direction="write"
338
+ object_=parameters, annotation=PromptParameters, direction="write"
370
339
  ),
371
340
  "settings": convert_and_respect_annotation_metadata(
372
- object_=settings, annotation=PromptSettingsRequest, direction="write"
373
- ),
374
- "blocks": convert_and_respect_annotation_metadata(
375
- object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
341
+ object_=settings, annotation=PromptSettings, direction="write"
376
342
  ),
343
+ "blocks": blocks,
377
344
  "expand_meta": convert_and_respect_annotation_metadata(
378
- object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
345
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
379
346
  ),
380
347
  },
381
348
  request_options=request_options,