vellum-ai 0.7.6__py3-none-any.whl → 0.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +8 -0
- vellum/core/client_wrapper.py +1 -1
- vellum/resources/ml_models/client.py +24 -20
- vellum/types/__init__.py +8 -1
- vellum/types/ml_model_exec_config.py +0 -1
- vellum/types/ml_model_exec_config_request.py +0 -1
- vellum/types/named_scenario_input_json_variable_value_request.py +34 -0
- vellum/types/named_scenario_input_request.py +27 -1
- vellum/types/scenario_input.py +25 -1
- vellum/types/scenario_input_json_variable_value.py +34 -0
- {vellum_ai-0.7.6.dist-info → vellum_ai-0.7.8.dist-info}/METADATA +1 -1
- {vellum_ai-0.7.6.dist-info → vellum_ai-0.7.8.dist-info}/RECORD +14 -12
- {vellum_ai-0.7.6.dist-info → vellum_ai-0.7.8.dist-info}/LICENSE +0 -0
- {vellum_ai-0.7.6.dist-info → vellum_ai-0.7.8.dist-info}/WHEEL +0 -0
vellum/__init__.py
CHANGED
@@ -236,8 +236,10 @@ from .types import (
|
|
236
236
|
MlModelTokenizerConfig_Tiktoken,
|
237
237
|
MlModelUsage,
|
238
238
|
NamedScenarioInputChatHistoryVariableValueRequest,
|
239
|
+
NamedScenarioInputJsonVariableValueRequest,
|
239
240
|
NamedScenarioInputRequest,
|
240
241
|
NamedScenarioInputRequest_ChatHistory,
|
242
|
+
NamedScenarioInputRequest_Json,
|
241
243
|
NamedScenarioInputRequest_String,
|
242
244
|
NamedScenarioInputStringVariableValueRequest,
|
243
245
|
NamedTestCaseArrayVariableValue,
|
@@ -395,8 +397,10 @@ from .types import (
|
|
395
397
|
SandboxScenario,
|
396
398
|
ScenarioInput,
|
397
399
|
ScenarioInputChatHistoryVariableValue,
|
400
|
+
ScenarioInputJsonVariableValue,
|
398
401
|
ScenarioInputStringVariableValue,
|
399
402
|
ScenarioInput_ChatHistory,
|
403
|
+
ScenarioInput_Json,
|
400
404
|
ScenarioInput_String,
|
401
405
|
SearchFiltersRequest,
|
402
406
|
SearchNodeResult,
|
@@ -922,8 +926,10 @@ __all__ = [
|
|
922
926
|
"MlModelTokenizerConfig_Tiktoken",
|
923
927
|
"MlModelUsage",
|
924
928
|
"NamedScenarioInputChatHistoryVariableValueRequest",
|
929
|
+
"NamedScenarioInputJsonVariableValueRequest",
|
925
930
|
"NamedScenarioInputRequest",
|
926
931
|
"NamedScenarioInputRequest_ChatHistory",
|
932
|
+
"NamedScenarioInputRequest_Json",
|
927
933
|
"NamedScenarioInputRequest_String",
|
928
934
|
"NamedScenarioInputStringVariableValueRequest",
|
929
935
|
"NamedTestCaseArrayVariableValue",
|
@@ -1082,8 +1088,10 @@ __all__ = [
|
|
1082
1088
|
"SandboxScenario",
|
1083
1089
|
"ScenarioInput",
|
1084
1090
|
"ScenarioInputChatHistoryVariableValue",
|
1091
|
+
"ScenarioInputJsonVariableValue",
|
1085
1092
|
"ScenarioInputStringVariableValue",
|
1086
1093
|
"ScenarioInput_ChatHistory",
|
1094
|
+
"ScenarioInput_Json",
|
1087
1095
|
"ScenarioInput_String",
|
1088
1096
|
"SearchFiltersRequest",
|
1089
1097
|
"SearchNodeResult",
|
vellum/core/client_wrapper.py
CHANGED
@@ -85,9 +85,9 @@ class MlModelsClient:
|
|
85
85
|
*,
|
86
86
|
name: str,
|
87
87
|
family: MlModelFamily,
|
88
|
+
hosted_by: HostedByEnum,
|
89
|
+
developed_by: MlModelDeveloper,
|
88
90
|
exec_config: MlModelExecConfigRequest,
|
89
|
-
hosted_by: typing.Optional[HostedByEnum] = OMIT,
|
90
|
-
developed_by: typing.Optional[MlModelDeveloper] = OMIT,
|
91
91
|
parameter_config: typing.Optional[MlModelParameterConfigRequest] = OMIT,
|
92
92
|
display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
|
93
93
|
visibility: typing.Optional[VisibilityEnum] = OMIT,
|
@@ -125,10 +125,7 @@ class MlModelsClient:
|
|
125
125
|
* `YI` - Yi
|
126
126
|
* `ZEPHYR` - Zephyr
|
127
127
|
|
128
|
-
|
129
|
-
Configuration for how to execute the ML Model.
|
130
|
-
|
131
|
-
hosted_by : typing.Optional[HostedByEnum]
|
128
|
+
hosted_by : HostedByEnum
|
132
129
|
The organization hosting the ML Model.
|
133
130
|
|
134
131
|
* `ANTHROPIC` - ANTHROPIC
|
@@ -149,7 +146,7 @@ class MlModelsClient:
|
|
149
146
|
* `PYQ` - PYQ
|
150
147
|
* `REPLICATE` - REPLICATE
|
151
148
|
|
152
|
-
developed_by :
|
149
|
+
developed_by : MlModelDeveloper
|
153
150
|
The organization that developed the ML Model.
|
154
151
|
|
155
152
|
* `01_AI` - 01_AI
|
@@ -171,6 +168,9 @@ class MlModelsClient:
|
|
171
168
|
* `TII` - TII
|
172
169
|
* `WIZARDLM` - WIZARDLM
|
173
170
|
|
171
|
+
exec_config : MlModelExecConfigRequest
|
172
|
+
Configuration for how to execute the ML Model.
|
173
|
+
|
174
174
|
parameter_config : typing.Optional[MlModelParameterConfigRequest]
|
175
175
|
Configuration for the ML Model's parameters.
|
176
176
|
|
@@ -204,6 +204,8 @@ class MlModelsClient:
|
|
204
204
|
client.ml_models.create(
|
205
205
|
name="name",
|
206
206
|
family="CAPYBARA",
|
207
|
+
hosted_by="ANTHROPIC",
|
208
|
+
developed_by="01_AI",
|
207
209
|
exec_config=MlModelExecConfigRequest(
|
208
210
|
model_identifier="model_identifier",
|
209
211
|
base_url="base_url",
|
@@ -244,7 +246,7 @@ class MlModelsClient:
|
|
244
246
|
Parameters
|
245
247
|
----------
|
246
248
|
id : str
|
247
|
-
|
249
|
+
Either the ML Model's ID or its unique name
|
248
250
|
|
249
251
|
request_options : typing.Optional[RequestOptions]
|
250
252
|
Request-specific configuration.
|
@@ -293,7 +295,7 @@ class MlModelsClient:
|
|
293
295
|
Parameters
|
294
296
|
----------
|
295
297
|
id : str
|
296
|
-
|
298
|
+
Either the ML Model's ID or its unique name
|
297
299
|
|
298
300
|
display_config : typing.Optional[MlModelDisplayConfigRequest]
|
299
301
|
Configuration for how to display the ML Model.
|
@@ -355,7 +357,7 @@ class MlModelsClient:
|
|
355
357
|
Parameters
|
356
358
|
----------
|
357
359
|
id : str
|
358
|
-
|
360
|
+
Either the ML Model's ID or its unique name
|
359
361
|
|
360
362
|
display_config : typing.Optional[MlModelDisplayConfigRequest]
|
361
363
|
Configuration for how to display the ML Model.
|
@@ -475,9 +477,9 @@ class AsyncMlModelsClient:
|
|
475
477
|
*,
|
476
478
|
name: str,
|
477
479
|
family: MlModelFamily,
|
480
|
+
hosted_by: HostedByEnum,
|
481
|
+
developed_by: MlModelDeveloper,
|
478
482
|
exec_config: MlModelExecConfigRequest,
|
479
|
-
hosted_by: typing.Optional[HostedByEnum] = OMIT,
|
480
|
-
developed_by: typing.Optional[MlModelDeveloper] = OMIT,
|
481
483
|
parameter_config: typing.Optional[MlModelParameterConfigRequest] = OMIT,
|
482
484
|
display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
|
483
485
|
visibility: typing.Optional[VisibilityEnum] = OMIT,
|
@@ -515,10 +517,7 @@ class AsyncMlModelsClient:
|
|
515
517
|
* `YI` - Yi
|
516
518
|
* `ZEPHYR` - Zephyr
|
517
519
|
|
518
|
-
|
519
|
-
Configuration for how to execute the ML Model.
|
520
|
-
|
521
|
-
hosted_by : typing.Optional[HostedByEnum]
|
520
|
+
hosted_by : HostedByEnum
|
522
521
|
The organization hosting the ML Model.
|
523
522
|
|
524
523
|
* `ANTHROPIC` - ANTHROPIC
|
@@ -539,7 +538,7 @@ class AsyncMlModelsClient:
|
|
539
538
|
* `PYQ` - PYQ
|
540
539
|
* `REPLICATE` - REPLICATE
|
541
540
|
|
542
|
-
developed_by :
|
541
|
+
developed_by : MlModelDeveloper
|
543
542
|
The organization that developed the ML Model.
|
544
543
|
|
545
544
|
* `01_AI` - 01_AI
|
@@ -561,6 +560,9 @@ class AsyncMlModelsClient:
|
|
561
560
|
* `TII` - TII
|
562
561
|
* `WIZARDLM` - WIZARDLM
|
563
562
|
|
563
|
+
exec_config : MlModelExecConfigRequest
|
564
|
+
Configuration for how to execute the ML Model.
|
565
|
+
|
564
566
|
parameter_config : typing.Optional[MlModelParameterConfigRequest]
|
565
567
|
Configuration for the ML Model's parameters.
|
566
568
|
|
@@ -599,6 +601,8 @@ class AsyncMlModelsClient:
|
|
599
601
|
await client.ml_models.create(
|
600
602
|
name="name",
|
601
603
|
family="CAPYBARA",
|
604
|
+
hosted_by="ANTHROPIC",
|
605
|
+
developed_by="01_AI",
|
602
606
|
exec_config=MlModelExecConfigRequest(
|
603
607
|
model_identifier="model_identifier",
|
604
608
|
base_url="base_url",
|
@@ -642,7 +646,7 @@ class AsyncMlModelsClient:
|
|
642
646
|
Parameters
|
643
647
|
----------
|
644
648
|
id : str
|
645
|
-
|
649
|
+
Either the ML Model's ID or its unique name
|
646
650
|
|
647
651
|
request_options : typing.Optional[RequestOptions]
|
648
652
|
Request-specific configuration.
|
@@ -699,7 +703,7 @@ class AsyncMlModelsClient:
|
|
699
703
|
Parameters
|
700
704
|
----------
|
701
705
|
id : str
|
702
|
-
|
706
|
+
Either the ML Model's ID or its unique name
|
703
707
|
|
704
708
|
display_config : typing.Optional[MlModelDisplayConfigRequest]
|
705
709
|
Configuration for how to display the ML Model.
|
@@ -769,7 +773,7 @@ class AsyncMlModelsClient:
|
|
769
773
|
Parameters
|
770
774
|
----------
|
771
775
|
id : str
|
772
|
-
|
776
|
+
Either the ML Model's ID or its unique name
|
773
777
|
|
774
778
|
display_config : typing.Optional[MlModelDisplayConfigRequest]
|
775
779
|
Configuration for how to display the ML Model.
|
vellum/types/__init__.py
CHANGED
@@ -279,9 +279,11 @@ from .ml_model_tokenizer_config_request import (
|
|
279
279
|
)
|
280
280
|
from .ml_model_usage import MlModelUsage
|
281
281
|
from .named_scenario_input_chat_history_variable_value_request import NamedScenarioInputChatHistoryVariableValueRequest
|
282
|
+
from .named_scenario_input_json_variable_value_request import NamedScenarioInputJsonVariableValueRequest
|
282
283
|
from .named_scenario_input_request import (
|
283
284
|
NamedScenarioInputRequest,
|
284
285
|
NamedScenarioInputRequest_ChatHistory,
|
286
|
+
NamedScenarioInputRequest_Json,
|
285
287
|
NamedScenarioInputRequest_String,
|
286
288
|
)
|
287
289
|
from .named_scenario_input_string_variable_value_request import NamedScenarioInputStringVariableValueRequest
|
@@ -454,8 +456,9 @@ from .rejected_workflow_node_result_event import RejectedWorkflowNodeResultEvent
|
|
454
456
|
from .release_tag_source import ReleaseTagSource
|
455
457
|
from .replace_test_suite_test_case_request import ReplaceTestSuiteTestCaseRequest
|
456
458
|
from .sandbox_scenario import SandboxScenario
|
457
|
-
from .scenario_input import ScenarioInput, ScenarioInput_ChatHistory, ScenarioInput_String
|
459
|
+
from .scenario_input import ScenarioInput, ScenarioInput_ChatHistory, ScenarioInput_Json, ScenarioInput_String
|
458
460
|
from .scenario_input_chat_history_variable_value import ScenarioInputChatHistoryVariableValue
|
461
|
+
from .scenario_input_json_variable_value import ScenarioInputJsonVariableValue
|
459
462
|
from .scenario_input_string_variable_value import ScenarioInputStringVariableValue
|
460
463
|
from .search_filters_request import SearchFiltersRequest
|
461
464
|
from .search_node_result import SearchNodeResult
|
@@ -987,8 +990,10 @@ __all__ = [
|
|
987
990
|
"MlModelTokenizerConfig_Tiktoken",
|
988
991
|
"MlModelUsage",
|
989
992
|
"NamedScenarioInputChatHistoryVariableValueRequest",
|
993
|
+
"NamedScenarioInputJsonVariableValueRequest",
|
990
994
|
"NamedScenarioInputRequest",
|
991
995
|
"NamedScenarioInputRequest_ChatHistory",
|
996
|
+
"NamedScenarioInputRequest_Json",
|
992
997
|
"NamedScenarioInputRequest_String",
|
993
998
|
"NamedScenarioInputStringVariableValueRequest",
|
994
999
|
"NamedTestCaseArrayVariableValue",
|
@@ -1146,8 +1151,10 @@ __all__ = [
|
|
1146
1151
|
"SandboxScenario",
|
1147
1152
|
"ScenarioInput",
|
1148
1153
|
"ScenarioInputChatHistoryVariableValue",
|
1154
|
+
"ScenarioInputJsonVariableValue",
|
1149
1155
|
"ScenarioInputStringVariableValue",
|
1150
1156
|
"ScenarioInput_ChatHistory",
|
1157
|
+
"ScenarioInput_Json",
|
1151
1158
|
"ScenarioInput_String",
|
1152
1159
|
"SearchFiltersRequest",
|
1153
1160
|
"SearchNodeResult",
|
@@ -16,7 +16,6 @@ class MlModelExecConfig(pydantic_v1.BaseModel):
|
|
16
16
|
base_url: str
|
17
17
|
metadata: typing.Dict[str, typing.Any]
|
18
18
|
features: typing.List[MlModelFeature]
|
19
|
-
force_system_credentials: typing.Optional[bool] = None
|
20
19
|
tokenizer_config: typing.Optional[MlModelTokenizerConfig] = None
|
21
20
|
request_config: typing.Optional[MlModelRequestConfig] = None
|
22
21
|
response_config: typing.Optional[MlModelResponseConfig] = None
|
@@ -16,7 +16,6 @@ class MlModelExecConfigRequest(pydantic_v1.BaseModel):
|
|
16
16
|
base_url: str
|
17
17
|
metadata: typing.Dict[str, typing.Any]
|
18
18
|
features: typing.List[MlModelFeature]
|
19
|
-
force_system_credentials: typing.Optional[bool] = None
|
20
19
|
tokenizer_config: typing.Optional[MlModelTokenizerConfigRequest] = None
|
21
20
|
request_config: typing.Optional[MlModelRequestConfigRequest] = None
|
22
21
|
response_config: typing.Optional[MlModelResponseConfigRequest] = None
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class NamedScenarioInputJsonVariableValueRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Named Prompt Sandbox Scenario input value that is of type JSON
|
13
|
+
"""
|
14
|
+
|
15
|
+
value: typing.Any
|
16
|
+
name: str
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
25
|
+
|
26
|
+
return deep_union_pydantic_dicts(
|
27
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
28
|
+
)
|
29
|
+
|
30
|
+
class Config:
|
31
|
+
frozen = True
|
32
|
+
smart_union = True
|
33
|
+
extra = pydantic_v1.Extra.allow
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -34,6 +34,30 @@ class NamedScenarioInputRequest_String(pydantic_v1.BaseModel):
|
|
34
34
|
json_encoders = {dt.datetime: serialize_datetime}
|
35
35
|
|
36
36
|
|
37
|
+
class NamedScenarioInputRequest_Json(pydantic_v1.BaseModel):
|
38
|
+
value: typing.Any
|
39
|
+
name: str
|
40
|
+
type: typing.Literal["JSON"] = "JSON"
|
41
|
+
|
42
|
+
def json(self, **kwargs: typing.Any) -> str:
|
43
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
44
|
+
return super().json(**kwargs_with_defaults)
|
45
|
+
|
46
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
47
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
48
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
49
|
+
|
50
|
+
return deep_union_pydantic_dicts(
|
51
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
52
|
+
)
|
53
|
+
|
54
|
+
class Config:
|
55
|
+
frozen = True
|
56
|
+
smart_union = True
|
57
|
+
extra = pydantic_v1.Extra.allow
|
58
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
59
|
+
|
60
|
+
|
37
61
|
class NamedScenarioInputRequest_ChatHistory(pydantic_v1.BaseModel):
|
38
62
|
value: typing.Optional[typing.List[ChatMessageRequest]] = None
|
39
63
|
name: str
|
@@ -58,4 +82,6 @@ class NamedScenarioInputRequest_ChatHistory(pydantic_v1.BaseModel):
|
|
58
82
|
json_encoders = {dt.datetime: serialize_datetime}
|
59
83
|
|
60
84
|
|
61
|
-
NamedScenarioInputRequest = typing.Union[
|
85
|
+
NamedScenarioInputRequest = typing.Union[
|
86
|
+
NamedScenarioInputRequest_String, NamedScenarioInputRequest_Json, NamedScenarioInputRequest_ChatHistory
|
87
|
+
]
|
vellum/types/scenario_input.py
CHANGED
@@ -34,6 +34,30 @@ class ScenarioInput_String(pydantic_v1.BaseModel):
|
|
34
34
|
json_encoders = {dt.datetime: serialize_datetime}
|
35
35
|
|
36
36
|
|
37
|
+
class ScenarioInput_Json(pydantic_v1.BaseModel):
|
38
|
+
value: typing.Any
|
39
|
+
input_variable_id: str
|
40
|
+
type: typing.Literal["JSON"] = "JSON"
|
41
|
+
|
42
|
+
def json(self, **kwargs: typing.Any) -> str:
|
43
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
44
|
+
return super().json(**kwargs_with_defaults)
|
45
|
+
|
46
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
47
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
48
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
49
|
+
|
50
|
+
return deep_union_pydantic_dicts(
|
51
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
52
|
+
)
|
53
|
+
|
54
|
+
class Config:
|
55
|
+
frozen = True
|
56
|
+
smart_union = True
|
57
|
+
extra = pydantic_v1.Extra.allow
|
58
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
59
|
+
|
60
|
+
|
37
61
|
class ScenarioInput_ChatHistory(pydantic_v1.BaseModel):
|
38
62
|
value: typing.Optional[typing.List[ChatMessage]] = None
|
39
63
|
input_variable_id: str
|
@@ -58,4 +82,4 @@ class ScenarioInput_ChatHistory(pydantic_v1.BaseModel):
|
|
58
82
|
json_encoders = {dt.datetime: serialize_datetime}
|
59
83
|
|
60
84
|
|
61
|
-
ScenarioInput = typing.Union[ScenarioInput_String, ScenarioInput_ChatHistory]
|
85
|
+
ScenarioInput = typing.Union[ScenarioInput_String, ScenarioInput_Json, ScenarioInput_ChatHistory]
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class ScenarioInputJsonVariableValue(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Prompt Sandbox Scenario input value that is of type JSON
|
13
|
+
"""
|
14
|
+
|
15
|
+
value: typing.Any
|
16
|
+
input_variable_id: str
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
25
|
+
|
26
|
+
return deep_union_pydantic_dicts(
|
27
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
28
|
+
)
|
29
|
+
|
30
|
+
class Config:
|
31
|
+
frozen = True
|
32
|
+
smart_union = True
|
33
|
+
extra = pydantic_v1.Extra.allow
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -1,8 +1,8 @@
|
|
1
|
-
vellum/__init__.py,sha256=
|
1
|
+
vellum/__init__.py,sha256=Y08IiuMhi07kjouldi6dHJvZRt8cwdEVSKrG0N9BzGM,49953
|
2
2
|
vellum/client.py,sha256=FEelOptuh8ylBnqSznSXvIUj2LWGTEPDTPrK5sgQkSE,83651
|
3
3
|
vellum/core/__init__.py,sha256=UFXpYzcGxWQUucU1TkjOQ9mGWN3A5JohluOIWVYKU4I,973
|
4
4
|
vellum/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
5
|
-
vellum/core/client_wrapper.py,sha256=
|
5
|
+
vellum/core/client_wrapper.py,sha256=zAmOvT3hIgcE7oJ1_UMRHNE5WUsLBY-bplg8t5xrwlo,1873
|
6
6
|
vellum/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
7
7
|
vellum/core/file.py,sha256=sy1RUGZ3aJYuw998bZytxxo6QdgKmlnlgBaMvwEKCGg,1480
|
8
8
|
vellum/core/http_client.py,sha256=46CyqS5Y8MwWTclAXnb1z5-ODJfwfHYbyhvjhb7RY1c,18753
|
@@ -41,7 +41,7 @@ vellum/resources/documents/client.py,sha256=lrRR9wp5nnMnENycYm-FrWwKIy7tKrfpHQ5L
|
|
41
41
|
vellum/resources/folder_entities/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
42
42
|
vellum/resources/folder_entities/client.py,sha256=EZ_RjrB87rPLoaqNC44Dkrhp7aWEqEqI2pm5bekMqLw,4359
|
43
43
|
vellum/resources/ml_models/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
44
|
-
vellum/resources/ml_models/client.py,sha256=
|
44
|
+
vellum/resources/ml_models/client.py,sha256=ygrjdLSoUBJIHjT4AtofxcWezx-E1pO5-tgpt4XvwvI,26644
|
45
45
|
vellum/resources/sandboxes/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
46
46
|
vellum/resources/sandboxes/client.py,sha256=Vn80xkXWKZ8llBQSSoSqs9NU62mP1BBpNxgRBpDdLy8,15204
|
47
47
|
vellum/resources/test_suite_runs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
@@ -63,7 +63,7 @@ vellum/terraform/document_index/__init__.py,sha256=GY4Sn8X8-TgNiW_2Rph2uvY6tmJ6q
|
|
63
63
|
vellum/terraform/provider/__init__.py,sha256=YYQLWWJDslcjc1eN0N719A3wqMdbNR2c3WuqGtX1U_I,12684
|
64
64
|
vellum/terraform/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
65
65
|
vellum/terraform/versions.json,sha256=L-eKdEx1tp0hyZY8TGQ0Gy1a5R7q9hHAMLrCXICurZo,56
|
66
|
-
vellum/types/__init__.py,sha256=
|
66
|
+
vellum/types/__init__.py,sha256=74SSEdsGGOEXMsJZJwqRExTDFUxadMjmPdS_YpK5710,66645
|
67
67
|
vellum/types/add_openai_api_key_enum.py,sha256=GB7sLK_Ou7-Xn73sKJHUo6Gx3TjyhU7uJvWZAg4UeaI,92
|
68
68
|
vellum/types/api_node_result.py,sha256=H25_pCtZ9xT6GTbQG1Gae89yjK-45yKipSLNqfc2PTk,1246
|
69
69
|
vellum/types/api_node_result_data.py,sha256=1MIuC_rN_4ul7PQVWPbQySrLKzwwVb5sjXO6G6v13z4,1424
|
@@ -200,8 +200,8 @@ vellum/types/ml_model_display_config_labelled.py,sha256=SleUaRCUTc3nFAqU6R8UOOzR
|
|
200
200
|
vellum/types/ml_model_display_config_request.py,sha256=WfPdaLzX0X0w4V-3hQ1A8JOuej1mqsevEzkj8pW-ry4,1242
|
201
201
|
vellum/types/ml_model_display_tag.py,sha256=mvxWDmFB7nxMiO9f9XEueWoXL3YmkPfqvroDVzVXNyE,229
|
202
202
|
vellum/types/ml_model_display_tag_enum_value_label.py,sha256=r8_Nxz3TOBcEhWmVVgCbrI0h2x85Q8RsGA-TLZOBBe0,1213
|
203
|
-
vellum/types/ml_model_exec_config.py,sha256=
|
204
|
-
vellum/types/ml_model_exec_config_request.py,sha256=
|
203
|
+
vellum/types/ml_model_exec_config.py,sha256=bZiGQ2XQ2M67eJHzOM9wZIE3oq2Mee9B-9InNj5-Ew4,1658
|
204
|
+
vellum/types/ml_model_exec_config_request.py,sha256=Fy5nGCj7zXCnB6N1fs0cmdRCRV_hvYYNzS2NMXH47jE,1731
|
205
205
|
vellum/types/ml_model_family.py,sha256=96a6cigZeoGoM4VLvwvSmIKnEVBAI8RsW7xaS3EZZDI,513
|
206
206
|
vellum/types/ml_model_family_enum_value_label.py,sha256=ldXOZN0ZV277Q9qSjli3kJJ47ozzDe50d1DanB_l9v4,1196
|
207
207
|
vellum/types/ml_model_feature.py,sha256=s3DTCKk5m8MBdVV4-gHGe3Oj4B38fS6I1LLj8pps4Lo,441
|
@@ -219,7 +219,8 @@ vellum/types/ml_model_tokenizer_config.py,sha256=4E01CgGUU5FlHja7wnMio8pSpfnHStH
|
|
219
219
|
vellum/types/ml_model_tokenizer_config_request.py,sha256=mOLzJIidqLnkjg3Sj1JvHhX-e7ATK_tGuZt7oZ5W-Hc,2365
|
220
220
|
vellum/types/ml_model_usage.py,sha256=IFbXxMXf-4bkGuOzPIgfm0acZ4lVRI9C6uQpeO4_0o8,1349
|
221
221
|
vellum/types/named_scenario_input_chat_history_variable_value_request.py,sha256=CfGCHcFOEWL3UV-VjA6pYyExnpAabrlQ44c6LxdhjyA,1361
|
222
|
-
vellum/types/
|
222
|
+
vellum/types/named_scenario_input_json_variable_value_request.py,sha256=r6jsE_wRPQHudeTaKDrSlKYVH2Nu4F6PlSaKx21o4GM,1248
|
223
|
+
vellum/types/named_scenario_input_request.py,sha256=KGuT0bFtt3rmBJHmWkTlD8zKqRtmZKfP865aHifi6XM,3452
|
223
224
|
vellum/types/named_scenario_input_string_variable_value_request.py,sha256=tlJ8G8ru1BQHxdjfBpMiwKhQOHOw3mROuyJzG3KcGow,1269
|
224
225
|
vellum/types/named_test_case_array_variable_value.py,sha256=reOwe29S23QepGANR5yGWpKpUUQ_SUDPg5TCBBEnEDA,1323
|
225
226
|
vellum/types/named_test_case_array_variable_value_request.py,sha256=-_F7ymtx32IfSCSgoHjHXDE1kcloeQvZdy8lhRY1aCI,1352
|
@@ -319,8 +320,9 @@ vellum/types/rejected_workflow_node_result_event.py,sha256=n0yp5qdEyTEsnSVRAWPFU
|
|
319
320
|
vellum/types/release_tag_source.py,sha256=YavosOXZ976yfXTNWRTZwh2HhRiYmSDk0bQCkl-jCoQ,158
|
320
321
|
vellum/types/replace_test_suite_test_case_request.py,sha256=70JYolE2hwB52LU5gpkEgF3bKC-epaeDeuJrgPzJDmE,2209
|
321
322
|
vellum/types/sandbox_scenario.py,sha256=1vp9eQhMMPpiHfSWoAtTUpDmx0jy4VMWW9ZzHlaj7Yk,1407
|
322
|
-
vellum/types/scenario_input.py,sha256=
|
323
|
+
vellum/types/scenario_input.py,sha256=fMFI-FPKq6WBA1I_h_XrheeVN27JoqU_YB5vCGlD-ho,3379
|
323
324
|
vellum/types/scenario_input_chat_history_variable_value.py,sha256=l63tBZzYwgtcBVB_-Iph-ylRRQMV7FkD7fZVbadLk2c,1334
|
325
|
+
vellum/types/scenario_input_json_variable_value.py,sha256=SXZ7stmJ2DmXcYdmiwTkvKahP87EiCcau3QoRTn-q9I,1243
|
324
326
|
vellum/types/scenario_input_string_variable_value.py,sha256=QEeWZyJWvEeEParxhVs4n5sJ6fztkz4uA1CVqnIrVEw,1264
|
325
327
|
vellum/types/search_filters_request.py,sha256=3k90Kq2KWu8p9a-CYtkcybS7ODNosNLgBIk-ZL0uQkM,1481
|
326
328
|
vellum/types/search_node_result.py,sha256=Yz9S17nCtOTQDkhWM_hPUOHG8ZttG1TQukzSVjnWGH4,1260
|
@@ -494,7 +496,7 @@ vellum/types/workflow_result_event_output_data_search_results.py,sha256=_C4ueKK8
|
|
494
496
|
vellum/types/workflow_result_event_output_data_string.py,sha256=AAWHZT3X9HOIRA3UuIqw0VpfSGwGemsJM71WDNbWYTc,1745
|
495
497
|
vellum/types/workflow_stream_event.py,sha256=5K-Mtn9fvJDq8m5nhURDbChL01PXIiuIZDkfAC1d6fU,2610
|
496
498
|
vellum/version.py,sha256=neLt8HBHHUtDF9M5fsyUzHT-pKooEPvceaLDqqIGb0s,77
|
497
|
-
vellum_ai-0.7.
|
498
|
-
vellum_ai-0.7.
|
499
|
-
vellum_ai-0.7.
|
500
|
-
vellum_ai-0.7.
|
499
|
+
vellum_ai-0.7.8.dist-info/LICENSE,sha256=CcaljEIoOBaU-wItPH4PmM_mDCGpyuUY0Er1BGu5Ti8,1073
|
500
|
+
vellum_ai-0.7.8.dist-info/METADATA,sha256=R5nnOWBhtOMhaXi9kk1yndyxQnvBrb-nkVdWAnHmrEs,4398
|
501
|
+
vellum_ai-0.7.8.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
502
|
+
vellum_ai-0.7.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|