vellum-ai 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. vellum/__init__.py +88 -0
  2. vellum/client.py +118 -7
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/errors/forbidden_error.py +3 -2
  5. vellum/resources/test_suites/client.py +25 -27
  6. vellum/types/__init__.py +96 -0
  7. vellum/types/chat_history_input_request.py +30 -0
  8. vellum/types/error_execute_prompt_response.py +30 -0
  9. vellum/types/execute_prompt_api_error_response.py +28 -0
  10. vellum/types/execute_prompt_response.py +43 -0
  11. vellum/types/json_execute_prompt_response.py +29 -0
  12. vellum/types/json_input_request.py +29 -0
  13. vellum/types/prompt_deployment_input_request.py +43 -0
  14. vellum/types/register_prompt_model_parameters_request.py +1 -1
  15. vellum/types/search_result_document_request.py +34 -0
  16. vellum/types/search_result_request.py +34 -0
  17. vellum/types/string_execute_prompt_response.py +29 -0
  18. vellum/types/string_input_request.py +29 -0
  19. vellum/types/test_case_chat_history_variable_value.py +30 -0
  20. vellum/types/test_case_chat_history_variable_value_request.py +30 -0
  21. vellum/types/test_case_error_variable_value.py +30 -0
  22. vellum/types/test_case_error_variable_value_request.py +30 -0
  23. vellum/types/test_case_json_variable_value.py +29 -0
  24. vellum/types/test_case_json_variable_value_request.py +29 -0
  25. vellum/types/test_case_number_variable_value.py +29 -0
  26. vellum/types/test_case_number_variable_value_request.py +29 -0
  27. vellum/types/test_case_search_results_variable_value.py +30 -0
  28. vellum/types/test_case_search_results_variable_value_request.py +30 -0
  29. vellum/types/test_case_string_variable_value.py +29 -0
  30. vellum/types/test_case_string_variable_value_request.py +29 -0
  31. vellum/types/test_case_variable_value.py +78 -0
  32. vellum/types/test_case_variable_value_request.py +78 -0
  33. vellum/types/test_suite_test_case.py +5 -11
  34. vellum/types/vellum_error_request.py +30 -0
  35. {vellum_ai-0.1.3.dist-info → vellum_ai-0.1.5.dist-info}/METADATA +1 -1
  36. {vellum_ai-0.1.3.dist-info → vellum_ai-0.1.5.dist-info}/RECORD +37 -11
  37. {vellum_ai-0.1.3.dist-info → vellum_ai-0.1.5.dist-info}/WHEEL +0 -0
vellum/types/__init__.py CHANGED
@@ -3,6 +3,7 @@
3
3
  from .api_node_result import ApiNodeResult
4
4
  from .api_node_result_data import ApiNodeResultData
5
5
  from .block_type_enum import BlockTypeEnum
6
+ from .chat_history_input_request import ChatHistoryInputRequest
6
7
  from .chat_message import ChatMessage
7
8
  from .chat_message_request import ChatMessageRequest
8
9
  from .chat_message_role import ChatMessageRole
@@ -17,8 +18,16 @@ from .document_read import DocumentRead
17
18
  from .document_status import DocumentStatus
18
19
  from .enriched_normalized_completion import EnrichedNormalizedCompletion
19
20
  from .environment_enum import EnvironmentEnum
21
+ from .error_execute_prompt_response import ErrorExecutePromptResponse
20
22
  from .evaluation_params import EvaluationParams
21
23
  from .evaluation_params_request import EvaluationParamsRequest
24
+ from .execute_prompt_api_error_response import ExecutePromptApiErrorResponse
25
+ from .execute_prompt_response import (
26
+ ExecutePromptResponse,
27
+ ExecutePromptResponse_Error,
28
+ ExecutePromptResponse_Json,
29
+ ExecutePromptResponse_String,
30
+ )
22
31
  from .execute_workflow_stream_error_response import ExecuteWorkflowStreamErrorResponse
23
32
  from .finish_reason_enum import FinishReasonEnum
24
33
  from .generate_error_response import GenerateErrorResponse
@@ -32,6 +41,8 @@ from .generate_stream_response import GenerateStreamResponse
32
41
  from .generate_stream_result import GenerateStreamResult
33
42
  from .generate_stream_result_data import GenerateStreamResultData
34
43
  from .indexing_state_enum import IndexingStateEnum
44
+ from .json_execute_prompt_response import JsonExecutePromptResponse
45
+ from .json_input_request import JsonInputRequest
35
46
  from .logical_operator import LogicalOperator
36
47
  from .logprobs_enum import LogprobsEnum
37
48
  from .metadata_filter_config_request import MetadataFilterConfigRequest
@@ -63,6 +74,12 @@ from .normalized_token_log_probs import NormalizedTokenLogProbs
63
74
  from .paginated_slim_document_list import PaginatedSlimDocumentList
64
75
  from .processing_failure_reason_enum import ProcessingFailureReasonEnum
65
76
  from .processing_state_enum import ProcessingStateEnum
77
+ from .prompt_deployment_input_request import (
78
+ PromptDeploymentInputRequest,
79
+ PromptDeploymentInputRequest_ChatHistory,
80
+ PromptDeploymentInputRequest_Json,
81
+ PromptDeploymentInputRequest_String,
82
+ )
66
83
  from .prompt_node_result import PromptNodeResult
67
84
  from .prompt_node_result_data import PromptNodeResultData
68
85
  from .prompt_template_block import PromptTemplateBlock
@@ -96,9 +113,13 @@ from .search_request_options_request import SearchRequestOptionsRequest
96
113
  from .search_response import SearchResponse
97
114
  from .search_result import SearchResult
98
115
  from .search_result_document import SearchResultDocument
116
+ from .search_result_document_request import SearchResultDocumentRequest
99
117
  from .search_result_merging_request import SearchResultMergingRequest
118
+ from .search_result_request import SearchResultRequest
100
119
  from .search_weights_request import SearchWeightsRequest
101
120
  from .slim_document import SlimDocument
121
+ from .string_execute_prompt_response import StringExecutePromptResponse
122
+ from .string_input_request import StringInputRequest
102
123
  from .submit_completion_actual_request import SubmitCompletionActualRequest
103
124
  from .submit_completion_actuals_error_response import SubmitCompletionActualsErrorResponse
104
125
  from .submit_workflow_execution_actual_request import (
@@ -141,11 +162,42 @@ from .terminal_node_result_output import (
141
162
  )
142
163
  from .terminal_node_search_results_result import TerminalNodeSearchResultsResult
143
164
  from .terminal_node_string_result import TerminalNodeStringResult
165
+ from .test_case_chat_history_variable_value import TestCaseChatHistoryVariableValue
166
+ from .test_case_chat_history_variable_value_request import TestCaseChatHistoryVariableValueRequest
167
+ from .test_case_error_variable_value import TestCaseErrorVariableValue
168
+ from .test_case_error_variable_value_request import TestCaseErrorVariableValueRequest
169
+ from .test_case_json_variable_value import TestCaseJsonVariableValue
170
+ from .test_case_json_variable_value_request import TestCaseJsonVariableValueRequest
171
+ from .test_case_number_variable_value import TestCaseNumberVariableValue
172
+ from .test_case_number_variable_value_request import TestCaseNumberVariableValueRequest
173
+ from .test_case_search_results_variable_value import TestCaseSearchResultsVariableValue
174
+ from .test_case_search_results_variable_value_request import TestCaseSearchResultsVariableValueRequest
175
+ from .test_case_string_variable_value import TestCaseStringVariableValue
176
+ from .test_case_string_variable_value_request import TestCaseStringVariableValueRequest
177
+ from .test_case_variable_value import (
178
+ TestCaseVariableValue,
179
+ TestCaseVariableValue_ChatHistory,
180
+ TestCaseVariableValue_Error,
181
+ TestCaseVariableValue_Json,
182
+ TestCaseVariableValue_Number,
183
+ TestCaseVariableValue_SearchResults,
184
+ TestCaseVariableValue_String,
185
+ )
186
+ from .test_case_variable_value_request import (
187
+ TestCaseVariableValueRequest,
188
+ TestCaseVariableValueRequest_ChatHistory,
189
+ TestCaseVariableValueRequest_Error,
190
+ TestCaseVariableValueRequest_Json,
191
+ TestCaseVariableValueRequest_Number,
192
+ TestCaseVariableValueRequest_SearchResults,
193
+ TestCaseVariableValueRequest_String,
194
+ )
144
195
  from .test_suite_test_case import TestSuiteTestCase
145
196
  from .upload_document_error_response import UploadDocumentErrorResponse
146
197
  from .upload_document_response import UploadDocumentResponse
147
198
  from .vellum_error import VellumError
148
199
  from .vellum_error_code_enum import VellumErrorCodeEnum
200
+ from .vellum_error_request import VellumErrorRequest
149
201
  from .vellum_variable import VellumVariable
150
202
  from .vellum_variable_type import VellumVariableType
151
203
  from .workflow_event_error import WorkflowEventError
@@ -198,6 +250,7 @@ __all__ = [
198
250
  "ApiNodeResult",
199
251
  "ApiNodeResultData",
200
252
  "BlockTypeEnum",
253
+ "ChatHistoryInputRequest",
201
254
  "ChatMessage",
202
255
  "ChatMessageRequest",
203
256
  "ChatMessageRole",
@@ -212,8 +265,14 @@ __all__ = [
212
265
  "DocumentStatus",
213
266
  "EnrichedNormalizedCompletion",
214
267
  "EnvironmentEnum",
268
+ "ErrorExecutePromptResponse",
215
269
  "EvaluationParams",
216
270
  "EvaluationParamsRequest",
271
+ "ExecutePromptApiErrorResponse",
272
+ "ExecutePromptResponse",
273
+ "ExecutePromptResponse_Error",
274
+ "ExecutePromptResponse_Json",
275
+ "ExecutePromptResponse_String",
217
276
  "ExecuteWorkflowStreamErrorResponse",
218
277
  "FinishReasonEnum",
219
278
  "GenerateErrorResponse",
@@ -227,6 +286,8 @@ __all__ = [
227
286
  "GenerateStreamResult",
228
287
  "GenerateStreamResultData",
229
288
  "IndexingStateEnum",
289
+ "JsonExecutePromptResponse",
290
+ "JsonInputRequest",
230
291
  "LogicalOperator",
231
292
  "LogprobsEnum",
232
293
  "MetadataFilterConfigRequest",
@@ -256,6 +317,10 @@ __all__ = [
256
317
  "PaginatedSlimDocumentList",
257
318
  "ProcessingFailureReasonEnum",
258
319
  "ProcessingStateEnum",
320
+ "PromptDeploymentInputRequest",
321
+ "PromptDeploymentInputRequest_ChatHistory",
322
+ "PromptDeploymentInputRequest_Json",
323
+ "PromptDeploymentInputRequest_String",
259
324
  "PromptNodeResult",
260
325
  "PromptNodeResultData",
261
326
  "PromptTemplateBlock",
@@ -289,9 +354,13 @@ __all__ = [
289
354
  "SearchResponse",
290
355
  "SearchResult",
291
356
  "SearchResultDocument",
357
+ "SearchResultDocumentRequest",
292
358
  "SearchResultMergingRequest",
359
+ "SearchResultRequest",
293
360
  "SearchWeightsRequest",
294
361
  "SlimDocument",
362
+ "StringExecutePromptResponse",
363
+ "StringInputRequest",
295
364
  "SubmitCompletionActualRequest",
296
365
  "SubmitCompletionActualsErrorResponse",
297
366
  "SubmitWorkflowExecutionActualRequest",
@@ -328,11 +397,38 @@ __all__ = [
328
397
  "TerminalNodeResultOutput_String",
329
398
  "TerminalNodeSearchResultsResult",
330
399
  "TerminalNodeStringResult",
400
+ "TestCaseChatHistoryVariableValue",
401
+ "TestCaseChatHistoryVariableValueRequest",
402
+ "TestCaseErrorVariableValue",
403
+ "TestCaseErrorVariableValueRequest",
404
+ "TestCaseJsonVariableValue",
405
+ "TestCaseJsonVariableValueRequest",
406
+ "TestCaseNumberVariableValue",
407
+ "TestCaseNumberVariableValueRequest",
408
+ "TestCaseSearchResultsVariableValue",
409
+ "TestCaseSearchResultsVariableValueRequest",
410
+ "TestCaseStringVariableValue",
411
+ "TestCaseStringVariableValueRequest",
412
+ "TestCaseVariableValue",
413
+ "TestCaseVariableValueRequest",
414
+ "TestCaseVariableValueRequest_ChatHistory",
415
+ "TestCaseVariableValueRequest_Error",
416
+ "TestCaseVariableValueRequest_Json",
417
+ "TestCaseVariableValueRequest_Number",
418
+ "TestCaseVariableValueRequest_SearchResults",
419
+ "TestCaseVariableValueRequest_String",
420
+ "TestCaseVariableValue_ChatHistory",
421
+ "TestCaseVariableValue_Error",
422
+ "TestCaseVariableValue_Json",
423
+ "TestCaseVariableValue_Number",
424
+ "TestCaseVariableValue_SearchResults",
425
+ "TestCaseVariableValue_String",
331
426
  "TestSuiteTestCase",
332
427
  "UploadDocumentErrorResponse",
333
428
  "UploadDocumentResponse",
334
429
  "VellumError",
335
430
  "VellumErrorCodeEnum",
431
+ "VellumErrorRequest",
336
432
  "VellumVariable",
337
433
  "VellumVariableType",
338
434
  "WorkflowEventError",
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .chat_message_request import ChatMessageRequest
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class ChatHistoryInputRequest(pydantic.BaseModel):
16
+ name: str = pydantic.Field(description="The variable's name, as defined in the deployment.")
17
+ value: typing.List[ChatMessageRequest]
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .vellum_error import VellumError
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class ErrorExecutePromptResponse(pydantic.BaseModel):
16
+ value: VellumError
17
+ execution_id: str
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,28 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class ExecutePromptApiErrorResponse(pydantic.BaseModel):
15
+ detail: str = pydantic.Field(description="Details about why the request failed.")
16
+
17
+ def json(self, **kwargs: typing.Any) -> str:
18
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
19
+ return super().json(**kwargs_with_defaults)
20
+
21
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().dict(**kwargs_with_defaults)
24
+
25
+ class Config:
26
+ frozen = True
27
+ smart_union = True
28
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ import typing_extensions
8
+
9
+ from .error_execute_prompt_response import ErrorExecutePromptResponse
10
+ from .json_execute_prompt_response import JsonExecutePromptResponse
11
+ from .string_execute_prompt_response import StringExecutePromptResponse
12
+
13
+
14
+ class ExecutePromptResponse_Error(ErrorExecutePromptResponse):
15
+ type: typing_extensions.Literal["ERROR"]
16
+
17
+ class Config:
18
+ frozen = True
19
+ smart_union = True
20
+ allow_population_by_field_name = True
21
+
22
+
23
+ class ExecutePromptResponse_Json(JsonExecutePromptResponse):
24
+ type: typing_extensions.Literal["JSON"]
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ allow_population_by_field_name = True
30
+
31
+
32
+ class ExecutePromptResponse_String(StringExecutePromptResponse):
33
+ type: typing_extensions.Literal["STRING"]
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ allow_population_by_field_name = True
39
+
40
+
41
+ ExecutePromptResponse = typing.Union[
42
+ ExecutePromptResponse_Error, ExecutePromptResponse_Json, ExecutePromptResponse_String
43
+ ]
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class JsonExecutePromptResponse(pydantic.BaseModel):
15
+ value: typing.Dict[str, typing.Any]
16
+ execution_id: str
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class JsonInputRequest(pydantic.BaseModel):
15
+ name: str = pydantic.Field(description="The variable's name, as defined in the deployment.")
16
+ value: typing.Dict[str, typing.Any]
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ import typing_extensions
8
+
9
+ from .chat_history_input_request import ChatHistoryInputRequest
10
+ from .json_input_request import JsonInputRequest
11
+ from .string_input_request import StringInputRequest
12
+
13
+
14
+ class PromptDeploymentInputRequest_String(StringInputRequest):
15
+ type: typing_extensions.Literal["STRING"]
16
+
17
+ class Config:
18
+ frozen = True
19
+ smart_union = True
20
+ allow_population_by_field_name = True
21
+
22
+
23
+ class PromptDeploymentInputRequest_Json(JsonInputRequest):
24
+ type: typing_extensions.Literal["JSON"]
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ allow_population_by_field_name = True
30
+
31
+
32
+ class PromptDeploymentInputRequest_ChatHistory(ChatHistoryInputRequest):
33
+ type: typing_extensions.Literal["CHAT_HISTORY"]
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ allow_population_by_field_name = True
39
+
40
+
41
+ PromptDeploymentInputRequest = typing.Union[
42
+ PromptDeploymentInputRequest_String, PromptDeploymentInputRequest_Json, PromptDeploymentInputRequest_ChatHistory
43
+ ]
@@ -16,7 +16,7 @@ class RegisterPromptModelParametersRequest(pydantic.BaseModel):
16
16
  max_tokens: int
17
17
  stop: typing.Optional[typing.List[str]]
18
18
  top_p: float
19
- top_k: typing.Optional[float]
19
+ top_k: typing.Optional[int]
20
20
  frequency_penalty: float
21
21
  presence_penalty: float
22
22
  logit_bias: typing.Optional[typing.Dict[str, typing.Optional[float]]]
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class SearchResultDocumentRequest(pydantic.BaseModel):
15
+ label: str = pydantic.Field(description="The human-readable name for the document.")
16
+ external_id: typing.Optional[str] = pydantic.Field(
17
+ description="The unique ID of the document as represented in an external system and specified when it was originally uploaded."
18
+ )
19
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
20
+ description="A previously supplied JSON object containing metadata that can be filtered on when searching."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .search_result_document_request import SearchResultDocumentRequest
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class SearchResultRequest(pydantic.BaseModel):
16
+ text: str = pydantic.Field(description="The text of the chunk that matched the search query.")
17
+ score: float = pydantic.Field(description="A score representing how well the chunk matches the search query.")
18
+ keywords: typing.List[str]
19
+ document: SearchResultDocumentRequest = pydantic.Field(
20
+ description="The document that contains the chunk that matched the search query."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class StringExecutePromptResponse(pydantic.BaseModel):
15
+ value: str
16
+ execution_id: str
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class StringInputRequest(pydantic.BaseModel):
15
+ name: str = pydantic.Field(description="The variable's name, as defined in the deployment.")
16
+ value: str
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .chat_message import ChatMessage
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TestCaseChatHistoryVariableValue(pydantic.BaseModel):
16
+ variable_id: str
17
+ value: typing.Optional[typing.List[ChatMessage]]
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .chat_message_request import ChatMessageRequest
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TestCaseChatHistoryVariableValueRequest(pydantic.BaseModel):
16
+ variable_id: str
17
+ value: typing.Optional[typing.List[ChatMessageRequest]]
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .vellum_error import VellumError
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TestCaseErrorVariableValue(pydantic.BaseModel):
16
+ variable_id: str
17
+ value: typing.Optional[VellumError]
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}