vellum-ai 0.9.5__py3-none-any.whl → 0.9.7__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. vellum/__init__.py +14 -46
  2. vellum/core/client_wrapper.py +1 -1
  3. vellum/resources/ad_hoc/client.py +75 -110
  4. vellum/types/__init__.py +14 -46
  5. vellum/types/{ad_hoc_expand_meta_request.py → ad_hoc_expand_meta.py} +1 -1
  6. vellum/types/{prompt_parameters_request.py → prompt_parameters.py} +1 -1
  7. vellum/types/{prompt_request_chat_history_input_request.py → prompt_request_chat_history_input.py} +3 -3
  8. vellum/types/prompt_request_input.py +8 -0
  9. vellum/types/{prompt_request_json_input_request.py → prompt_request_json_input.py} +1 -1
  10. vellum/types/{prompt_request_string_input_request.py → prompt_request_string_input.py} +1 -1
  11. vellum/types/{prompt_settings_request.py → prompt_settings.py} +1 -1
  12. {vellum_ai-0.9.5.dist-info → vellum_ai-0.9.7.dist-info}/METADATA +1 -1
  13. {vellum_ai-0.9.5.dist-info → vellum_ai-0.9.7.dist-info}/RECORD +15 -31
  14. vellum/types/chat_message_prompt_block_properties_request.py +0 -38
  15. vellum/types/chat_message_prompt_block_request.py +0 -39
  16. vellum/types/ephemeral_prompt_cache_config_request.py +0 -20
  17. vellum/types/ephemeral_prompt_cache_config_type_enum.py +0 -5
  18. vellum/types/function_definition_prompt_block_properties_request.py +0 -42
  19. vellum/types/function_definition_prompt_block_request.py +0 -30
  20. vellum/types/jinja_prompt_block_properties_request.py +0 -21
  21. vellum/types/jinja_prompt_block_request.py +0 -30
  22. vellum/types/plain_text_prompt_block_request.py +0 -29
  23. vellum/types/prompt_block_request.py +0 -19
  24. vellum/types/prompt_block_state.py +0 -5
  25. vellum/types/prompt_request_input_request.py +0 -10
  26. vellum/types/rich_text_child_block_request.py +0 -7
  27. vellum/types/rich_text_prompt_block_request.py +0 -30
  28. vellum/types/variable_prompt_block_request.py +0 -29
  29. vellum/types/vellum_variable_extensions_request.py +0 -23
  30. vellum/types/vellum_variable_request.py +0 -33
  31. {vellum_ai-0.9.5.dist-info → vellum_ai-0.9.7.dist-info}/LICENSE +0 -0
  32. {vellum_ai-0.9.5.dist-info → vellum_ai-0.9.7.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from .types import (
4
4
  AdHocExecutePromptEvent,
5
- AdHocExpandMetaRequest,
5
+ AdHocExpandMeta,
6
6
  AdHocFulfilledPromptExecutionMeta,
7
7
  AdHocInitiatedPromptExecutionMeta,
8
8
  AdHocRejectedPromptExecutionMeta,
@@ -37,8 +37,6 @@ from .types import (
37
37
  ChatMessage,
38
38
  ChatMessageContent,
39
39
  ChatMessageContentRequest,
40
- ChatMessagePromptBlockPropertiesRequest,
41
- ChatMessagePromptBlockRequest,
42
40
  ChatMessageRequest,
43
41
  ChatMessageRole,
44
42
  CodeExecutionNodeArrayResult,
@@ -84,8 +82,6 @@ from .types import (
84
82
  EntityStatus,
85
83
  EntityVisibility,
86
84
  EnvironmentEnum,
87
- EphemeralPromptCacheConfigRequest,
88
- EphemeralPromptCacheConfigTypeEnum,
89
85
  ErrorInputRequest,
90
86
  ErrorVariableValue,
91
87
  ErrorVellumValue,
@@ -134,8 +130,6 @@ from .types import (
134
130
  FunctionCallVariableValue,
135
131
  FunctionCallVellumValue,
136
132
  FunctionCallVellumValueRequest,
137
- FunctionDefinitionPromptBlockPropertiesRequest,
138
- FunctionDefinitionPromptBlockRequest,
139
133
  GenerateOptionsRequest,
140
134
  GenerateRequest,
141
135
  GenerateResponse,
@@ -168,8 +162,6 @@ from .types import (
168
162
  InstructorVectorizerConfig,
169
163
  InstructorVectorizerConfigRequest,
170
164
  IterationStateEnum,
171
- JinjaPromptBlockPropertiesRequest,
172
- JinjaPromptBlockRequest,
173
165
  JsonInputRequest,
174
166
  JsonVariableValue,
175
167
  JsonVellumValue,
@@ -254,12 +246,9 @@ from .types import (
254
246
  PaginatedWorkflowReleaseTagReadList,
255
247
  PdfSearchResultMetaSource,
256
248
  PdfSearchResultMetaSourceRequest,
257
- PlainTextPromptBlockRequest,
258
249
  Price,
259
250
  ProcessingFailureReasonEnum,
260
251
  ProcessingStateEnum,
261
- PromptBlockRequest,
262
- PromptBlockState,
263
252
  PromptDeploymentExpandMetaRequest,
264
253
  PromptDeploymentInputRequest,
265
254
  PromptExecutionMeta,
@@ -267,12 +256,12 @@ from .types import (
267
256
  PromptNodeResult,
268
257
  PromptNodeResultData,
269
258
  PromptOutput,
270
- PromptParametersRequest,
271
- PromptRequestChatHistoryInputRequest,
272
- PromptRequestInputRequest,
273
- PromptRequestJsonInputRequest,
274
- PromptRequestStringInputRequest,
275
- PromptSettingsRequest,
259
+ PromptParameters,
260
+ PromptRequestChatHistoryInput,
261
+ PromptRequestInput,
262
+ PromptRequestJsonInput,
263
+ PromptRequestStringInput,
264
+ PromptSettings,
276
265
  RawPromptExecutionOverridesRequest,
277
266
  ReductoChunkerConfig,
278
267
  ReductoChunkerConfigRequest,
@@ -286,8 +275,6 @@ from .types import (
286
275
  RejectedWorkflowNodeResultEvent,
287
276
  ReleaseTagSource,
288
277
  ReplaceTestSuiteTestCaseRequest,
289
- RichTextChildBlockRequest,
290
- RichTextPromptBlockRequest,
291
278
  SandboxScenario,
292
279
  ScenarioInput,
293
280
  ScenarioInputChatHistoryVariableValue,
@@ -419,7 +406,6 @@ from .types import (
419
406
  UnitEnum,
420
407
  UploadDocumentResponse,
421
408
  UpsertTestSuiteTestCaseRequest,
422
- VariablePromptBlockRequest,
423
409
  VellumAudio,
424
410
  VellumAudioRequest,
425
411
  VellumError,
@@ -434,8 +420,6 @@ from .types import (
434
420
  VellumValueRequest,
435
421
  VellumVariable,
436
422
  VellumVariableExtensions,
437
- VellumVariableExtensionsRequest,
438
- VellumVariableRequest,
439
423
  VellumVariableType,
440
424
  WorkflowDeploymentRead,
441
425
  WorkflowEventError,
@@ -513,7 +497,7 @@ from .version import __version__
513
497
 
514
498
  __all__ = [
515
499
  "AdHocExecutePromptEvent",
516
- "AdHocExpandMetaRequest",
500
+ "AdHocExpandMeta",
517
501
  "AdHocFulfilledPromptExecutionMeta",
518
502
  "AdHocInitiatedPromptExecutionMeta",
519
503
  "AdHocRejectedPromptExecutionMeta",
@@ -550,8 +534,6 @@ __all__ = [
550
534
  "ChatMessage",
551
535
  "ChatMessageContent",
552
536
  "ChatMessageContentRequest",
553
- "ChatMessagePromptBlockPropertiesRequest",
554
- "ChatMessagePromptBlockRequest",
555
537
  "ChatMessageRequest",
556
538
  "ChatMessageRole",
557
539
  "CodeExecutionNodeArrayResult",
@@ -599,8 +581,6 @@ __all__ = [
599
581
  "EntityStatus",
600
582
  "EntityVisibility",
601
583
  "EnvironmentEnum",
602
- "EphemeralPromptCacheConfigRequest",
603
- "EphemeralPromptCacheConfigTypeEnum",
604
584
  "ErrorInputRequest",
605
585
  "ErrorVariableValue",
606
586
  "ErrorVellumValue",
@@ -651,8 +631,6 @@ __all__ = [
651
631
  "FunctionCallVariableValue",
652
632
  "FunctionCallVellumValue",
653
633
  "FunctionCallVellumValueRequest",
654
- "FunctionDefinitionPromptBlockPropertiesRequest",
655
- "FunctionDefinitionPromptBlockRequest",
656
634
  "GenerateOptionsRequest",
657
635
  "GenerateRequest",
658
636
  "GenerateResponse",
@@ -686,8 +664,6 @@ __all__ = [
686
664
  "InstructorVectorizerConfigRequest",
687
665
  "InternalServerError",
688
666
  "IterationStateEnum",
689
- "JinjaPromptBlockPropertiesRequest",
690
- "JinjaPromptBlockRequest",
691
667
  "JsonInputRequest",
692
668
  "JsonVariableValue",
693
669
  "JsonVellumValue",
@@ -775,12 +751,9 @@ __all__ = [
775
751
  "PaginatedWorkflowReleaseTagReadList",
776
752
  "PdfSearchResultMetaSource",
777
753
  "PdfSearchResultMetaSourceRequest",
778
- "PlainTextPromptBlockRequest",
779
754
  "Price",
780
755
  "ProcessingFailureReasonEnum",
781
756
  "ProcessingStateEnum",
782
- "PromptBlockRequest",
783
- "PromptBlockState",
784
757
  "PromptDeploymentExpandMetaRequest",
785
758
  "PromptDeploymentInputRequest",
786
759
  "PromptExecutionMeta",
@@ -788,12 +761,12 @@ __all__ = [
788
761
  "PromptNodeResult",
789
762
  "PromptNodeResultData",
790
763
  "PromptOutput",
791
- "PromptParametersRequest",
792
- "PromptRequestChatHistoryInputRequest",
793
- "PromptRequestInputRequest",
794
- "PromptRequestJsonInputRequest",
795
- "PromptRequestStringInputRequest",
796
- "PromptSettingsRequest",
764
+ "PromptParameters",
765
+ "PromptRequestChatHistoryInput",
766
+ "PromptRequestInput",
767
+ "PromptRequestJsonInput",
768
+ "PromptRequestStringInput",
769
+ "PromptSettings",
797
770
  "RawPromptExecutionOverridesRequest",
798
771
  "ReductoChunkerConfig",
799
772
  "ReductoChunkerConfigRequest",
@@ -807,8 +780,6 @@ __all__ = [
807
780
  "RejectedWorkflowNodeResultEvent",
808
781
  "ReleaseTagSource",
809
782
  "ReplaceTestSuiteTestCaseRequest",
810
- "RichTextChildBlockRequest",
811
- "RichTextPromptBlockRequest",
812
783
  "SandboxScenario",
813
784
  "ScenarioInput",
814
785
  "ScenarioInputChatHistoryVariableValue",
@@ -940,7 +911,6 @@ __all__ = [
940
911
  "UnitEnum",
941
912
  "UploadDocumentResponse",
942
913
  "UpsertTestSuiteTestCaseRequest",
943
- "VariablePromptBlockRequest",
944
914
  "Vellum",
945
915
  "VellumAudio",
946
916
  "VellumAudioRequest",
@@ -957,8 +927,6 @@ __all__ = [
957
927
  "VellumValueRequest",
958
928
  "VellumVariable",
959
929
  "VellumVariableExtensions",
960
- "VellumVariableExtensionsRequest",
961
- "VellumVariableRequest",
962
930
  "VellumVariableType",
963
931
  "WorkflowDeploymentRead",
964
932
  "WorkflowDeploymentsListRequestStatus",
@@ -17,7 +17,7 @@ class BaseClientWrapper:
17
17
  headers: typing.Dict[str, str] = {
18
18
  "X-Fern-Language": "Python",
19
19
  "X-Fern-SDK-Name": "vellum-ai",
20
- "X-Fern-SDK-Version": "0.9.5",
20
+ "X-Fern-SDK-Version": "0.9.7",
21
21
  }
22
22
  headers["X_API_KEY"] = self.api_key
23
23
  return headers
@@ -2,12 +2,11 @@
2
2
 
3
3
  import typing
4
4
  from ...core.client_wrapper import SyncClientWrapper
5
- from ...types.prompt_request_input_request import PromptRequestInputRequest
6
- from ...types.vellum_variable_request import VellumVariableRequest
7
- from ...types.prompt_parameters_request import PromptParametersRequest
8
- from ...types.prompt_block_request import PromptBlockRequest
9
- from ...types.prompt_settings_request import PromptSettingsRequest
10
- from ...types.ad_hoc_expand_meta_request import AdHocExpandMetaRequest
5
+ from ...types.prompt_request_input import PromptRequestInput
6
+ from ...types.vellum_variable import VellumVariable
7
+ from ...types.prompt_parameters import PromptParameters
8
+ from ...types.prompt_settings import PromptSettings
9
+ from ...types.ad_hoc_expand_meta import AdHocExpandMeta
11
10
  from ...core.request_options import RequestOptions
12
11
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
13
12
  from ...core.serialization import convert_and_respect_annotation_metadata
@@ -32,12 +31,12 @@ class AdHocClient:
32
31
  self,
33
32
  *,
34
33
  ml_model: str,
35
- input_values: typing.Sequence[PromptRequestInputRequest],
36
- input_variables: typing.Sequence[VellumVariableRequest],
37
- parameters: PromptParametersRequest,
38
- blocks: typing.Sequence[PromptBlockRequest],
39
- settings: typing.Optional[PromptSettingsRequest] = OMIT,
40
- expand_meta: typing.Optional[AdHocExpandMetaRequest] = OMIT,
34
+ input_values: typing.Sequence[PromptRequestInput],
35
+ input_variables: typing.Sequence[VellumVariable],
36
+ parameters: PromptParameters,
37
+ blocks: typing.Sequence[typing.Optional[typing.Any]],
38
+ settings: typing.Optional[PromptSettings] = OMIT,
39
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
41
40
  request_options: typing.Optional[RequestOptions] = None,
42
41
  ) -> typing.Iterator[AdHocExecutePromptEvent]:
43
42
  """
@@ -47,17 +46,17 @@ class AdHocClient:
47
46
  ----------
48
47
  ml_model : str
49
48
 
50
- input_values : typing.Sequence[PromptRequestInputRequest]
49
+ input_values : typing.Sequence[PromptRequestInput]
51
50
 
52
- input_variables : typing.Sequence[VellumVariableRequest]
51
+ input_variables : typing.Sequence[VellumVariable]
53
52
 
54
- parameters : PromptParametersRequest
53
+ parameters : PromptParameters
55
54
 
56
- blocks : typing.Sequence[PromptBlockRequest]
55
+ blocks : typing.Sequence[typing.Optional[typing.Any]]
57
56
 
58
- settings : typing.Optional[PromptSettingsRequest]
57
+ settings : typing.Optional[PromptSettings]
59
58
 
60
- expand_meta : typing.Optional[AdHocExpandMetaRequest]
59
+ expand_meta : typing.Optional[AdHocExpandMeta]
61
60
 
62
61
  request_options : typing.Optional[RequestOptions]
63
62
  Request-specific configuration.
@@ -70,17 +69,14 @@ class AdHocClient:
70
69
  Examples
71
70
  --------
72
71
  from vellum import (
73
- AdHocExpandMetaRequest,
74
- EphemeralPromptCacheConfigRequest,
75
- JinjaPromptBlockPropertiesRequest,
76
- JinjaPromptBlockRequest,
77
- PromptParametersRequest,
78
- PromptRequestStringInputRequest,
79
- PromptSettingsRequest,
80
- StringVellumValueRequest,
72
+ AdHocExpandMeta,
73
+ PromptParameters,
74
+ PromptRequestStringInput,
75
+ PromptSettings,
76
+ StringVellumValue,
81
77
  Vellum,
82
- VellumVariableExtensionsRequest,
83
- VellumVariableRequest,
78
+ VellumVariable,
79
+ VellumVariableExtensions,
84
80
  )
85
81
 
86
82
  client = Vellum(
@@ -89,26 +85,26 @@ class AdHocClient:
89
85
  response = client.ad_hoc.adhoc_execute_prompt_stream(
90
86
  ml_model="string",
91
87
  input_values=[
92
- PromptRequestStringInputRequest(
88
+ PromptRequestStringInput(
93
89
  key="string",
94
90
  value="string",
95
91
  )
96
92
  ],
97
93
  input_variables=[
98
- VellumVariableRequest(
94
+ VellumVariable(
99
95
  id="string",
100
96
  key="string",
101
97
  type="STRING",
102
98
  required=True,
103
- default=StringVellumValueRequest(
104
- value="string",
99
+ default=StringVellumValue(
100
+ value={"key": "value"},
105
101
  ),
106
- extensions=VellumVariableExtensionsRequest(
107
- color="string",
102
+ extensions=VellumVariableExtensions(
103
+ color={"key": "value"},
108
104
  ),
109
105
  )
110
106
  ],
111
- parameters=PromptParametersRequest(
107
+ parameters=PromptParameters(
112
108
  stop=["string"],
113
109
  temperature=1.1,
114
110
  max_tokens=1,
@@ -119,23 +115,11 @@ class AdHocClient:
119
115
  logit_bias={"string": {"key": "value"}},
120
116
  custom_parameters={"string": {"key": "value"}},
121
117
  ),
122
- settings=PromptSettingsRequest(
118
+ settings=PromptSettings(
123
119
  timeout=1.1,
124
120
  ),
125
- blocks=[
126
- JinjaPromptBlockRequest(
127
- properties=JinjaPromptBlockPropertiesRequest(
128
- template="string",
129
- template_type="STRING",
130
- ),
131
- id="string",
132
- state="ENABLED",
133
- cache_config=EphemeralPromptCacheConfigRequest(
134
- type={"key": "value"},
135
- ),
136
- )
137
- ],
138
- expand_meta=AdHocExpandMetaRequest(
121
+ blocks=[{"key": "value"}],
122
+ expand_meta=AdHocExpandMeta(
139
123
  cost=True,
140
124
  model_name=True,
141
125
  usage=True,
@@ -152,22 +136,20 @@ class AdHocClient:
152
136
  json={
153
137
  "ml_model": ml_model,
154
138
  "input_values": convert_and_respect_annotation_metadata(
155
- object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
139
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
156
140
  ),
157
141
  "input_variables": convert_and_respect_annotation_metadata(
158
- object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
142
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
159
143
  ),
160
144
  "parameters": convert_and_respect_annotation_metadata(
161
- object_=parameters, annotation=PromptParametersRequest, direction="write"
145
+ object_=parameters, annotation=PromptParameters, direction="write"
162
146
  ),
163
147
  "settings": convert_and_respect_annotation_metadata(
164
- object_=settings, annotation=PromptSettingsRequest, direction="write"
165
- ),
166
- "blocks": convert_and_respect_annotation_metadata(
167
- object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
148
+ object_=settings, annotation=PromptSettings, direction="write"
168
149
  ),
150
+ "blocks": blocks,
169
151
  "expand_meta": convert_and_respect_annotation_metadata(
170
- object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
152
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
171
153
  ),
172
154
  },
173
155
  request_options=request_options,
@@ -234,12 +216,12 @@ class AsyncAdHocClient:
234
216
  self,
235
217
  *,
236
218
  ml_model: str,
237
- input_values: typing.Sequence[PromptRequestInputRequest],
238
- input_variables: typing.Sequence[VellumVariableRequest],
239
- parameters: PromptParametersRequest,
240
- blocks: typing.Sequence[PromptBlockRequest],
241
- settings: typing.Optional[PromptSettingsRequest] = OMIT,
242
- expand_meta: typing.Optional[AdHocExpandMetaRequest] = OMIT,
219
+ input_values: typing.Sequence[PromptRequestInput],
220
+ input_variables: typing.Sequence[VellumVariable],
221
+ parameters: PromptParameters,
222
+ blocks: typing.Sequence[typing.Optional[typing.Any]],
223
+ settings: typing.Optional[PromptSettings] = OMIT,
224
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
243
225
  request_options: typing.Optional[RequestOptions] = None,
244
226
  ) -> typing.AsyncIterator[AdHocExecutePromptEvent]:
245
227
  """
@@ -249,17 +231,17 @@ class AsyncAdHocClient:
249
231
  ----------
250
232
  ml_model : str
251
233
 
252
- input_values : typing.Sequence[PromptRequestInputRequest]
234
+ input_values : typing.Sequence[PromptRequestInput]
253
235
 
254
- input_variables : typing.Sequence[VellumVariableRequest]
236
+ input_variables : typing.Sequence[VellumVariable]
255
237
 
256
- parameters : PromptParametersRequest
238
+ parameters : PromptParameters
257
239
 
258
- blocks : typing.Sequence[PromptBlockRequest]
240
+ blocks : typing.Sequence[typing.Optional[typing.Any]]
259
241
 
260
- settings : typing.Optional[PromptSettingsRequest]
242
+ settings : typing.Optional[PromptSettings]
261
243
 
262
- expand_meta : typing.Optional[AdHocExpandMetaRequest]
244
+ expand_meta : typing.Optional[AdHocExpandMeta]
263
245
 
264
246
  request_options : typing.Optional[RequestOptions]
265
247
  Request-specific configuration.
@@ -274,17 +256,14 @@ class AsyncAdHocClient:
274
256
  import asyncio
275
257
 
276
258
  from vellum import (
277
- AdHocExpandMetaRequest,
259
+ AdHocExpandMeta,
278
260
  AsyncVellum,
279
- EphemeralPromptCacheConfigRequest,
280
- JinjaPromptBlockPropertiesRequest,
281
- JinjaPromptBlockRequest,
282
- PromptParametersRequest,
283
- PromptRequestStringInputRequest,
284
- PromptSettingsRequest,
285
- StringVellumValueRequest,
286
- VellumVariableExtensionsRequest,
287
- VellumVariableRequest,
261
+ PromptParameters,
262
+ PromptRequestStringInput,
263
+ PromptSettings,
264
+ StringVellumValue,
265
+ VellumVariable,
266
+ VellumVariableExtensions,
288
267
  )
289
268
 
290
269
  client = AsyncVellum(
@@ -296,26 +275,26 @@ class AsyncAdHocClient:
296
275
  response = await client.ad_hoc.adhoc_execute_prompt_stream(
297
276
  ml_model="string",
298
277
  input_values=[
299
- PromptRequestStringInputRequest(
278
+ PromptRequestStringInput(
300
279
  key="string",
301
280
  value="string",
302
281
  )
303
282
  ],
304
283
  input_variables=[
305
- VellumVariableRequest(
284
+ VellumVariable(
306
285
  id="string",
307
286
  key="string",
308
287
  type="STRING",
309
288
  required=True,
310
- default=StringVellumValueRequest(
311
- value="string",
289
+ default=StringVellumValue(
290
+ value={"key": "value"},
312
291
  ),
313
- extensions=VellumVariableExtensionsRequest(
314
- color="string",
292
+ extensions=VellumVariableExtensions(
293
+ color={"key": "value"},
315
294
  ),
316
295
  )
317
296
  ],
318
- parameters=PromptParametersRequest(
297
+ parameters=PromptParameters(
319
298
  stop=["string"],
320
299
  temperature=1.1,
321
300
  max_tokens=1,
@@ -326,23 +305,11 @@ class AsyncAdHocClient:
326
305
  logit_bias={"string": {"key": "value"}},
327
306
  custom_parameters={"string": {"key": "value"}},
328
307
  ),
329
- settings=PromptSettingsRequest(
308
+ settings=PromptSettings(
330
309
  timeout=1.1,
331
310
  ),
332
- blocks=[
333
- JinjaPromptBlockRequest(
334
- properties=JinjaPromptBlockPropertiesRequest(
335
- template="string",
336
- template_type="STRING",
337
- ),
338
- id="string",
339
- state="ENABLED",
340
- cache_config=EphemeralPromptCacheConfigRequest(
341
- type={"key": "value"},
342
- ),
343
- )
344
- ],
345
- expand_meta=AdHocExpandMetaRequest(
311
+ blocks=[{"key": "value"}],
312
+ expand_meta=AdHocExpandMeta(
346
313
  cost=True,
347
314
  model_name=True,
348
315
  usage=True,
@@ -362,22 +329,20 @@ class AsyncAdHocClient:
362
329
  json={
363
330
  "ml_model": ml_model,
364
331
  "input_values": convert_and_respect_annotation_metadata(
365
- object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
332
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
366
333
  ),
367
334
  "input_variables": convert_and_respect_annotation_metadata(
368
- object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
335
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
369
336
  ),
370
337
  "parameters": convert_and_respect_annotation_metadata(
371
- object_=parameters, annotation=PromptParametersRequest, direction="write"
338
+ object_=parameters, annotation=PromptParameters, direction="write"
372
339
  ),
373
340
  "settings": convert_and_respect_annotation_metadata(
374
- object_=settings, annotation=PromptSettingsRequest, direction="write"
375
- ),
376
- "blocks": convert_and_respect_annotation_metadata(
377
- object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
341
+ object_=settings, annotation=PromptSettings, direction="write"
378
342
  ),
343
+ "blocks": blocks,
379
344
  "expand_meta": convert_and_respect_annotation_metadata(
380
- object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
345
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
381
346
  ),
382
347
  },
383
348
  request_options=request_options,