vellum-ai 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +70 -0
- vellum/client.py +331 -5
- vellum/core/client_wrapper.py +1 -1
- vellum/errors/forbidden_error.py +3 -2
- vellum/resources/registered_prompts/client.py +2 -0
- vellum/types/__init__.py +71 -0
- vellum/types/block_type_enum.py +4 -4
- vellum/types/chat_message_role.py +4 -4
- vellum/types/deployment_read.py +6 -6
- vellum/types/deployment_status.py +3 -3
- vellum/types/document_document_to_document_index.py +5 -5
- vellum/types/document_index_read.py +4 -4
- vellum/types/document_index_status.py +2 -2
- vellum/types/document_read.py +5 -5
- vellum/types/enriched_normalized_completion.py +3 -3
- vellum/types/environment_enum.py +3 -3
- vellum/types/error_variable_value.py +29 -0
- vellum/types/execute_prompt_api_error_response.py +28 -0
- vellum/types/execute_prompt_event.py +56 -0
- vellum/types/execute_prompt_response.py +31 -0
- vellum/types/finish_reason_enum.py +3 -3
- vellum/types/fulfilled_enum.py +5 -0
- vellum/types/fulfilled_execute_prompt_event.py +36 -0
- vellum/types/fulfilled_execute_prompt_response.py +39 -0
- vellum/types/fulfilled_prompt_execution_meta.py +34 -0
- vellum/types/generate_options_request.py +1 -1
- vellum/types/indexing_state_enum.py +5 -5
- vellum/types/initiated_enum.py +5 -0
- vellum/types/initiated_execute_prompt_event.py +34 -0
- vellum/types/initiated_prompt_execution_meta.py +35 -0
- vellum/types/json_variable_value.py +28 -0
- vellum/types/logical_operator.py +18 -18
- vellum/types/logprobs_enum.py +2 -2
- vellum/types/metadata_filter_rule_combinator.py +2 -2
- vellum/types/model_version_read.py +13 -12
- vellum/types/model_version_read_status_enum.py +4 -4
- vellum/types/processing_failure_reason_enum.py +2 -2
- vellum/types/processing_state_enum.py +4 -4
- vellum/types/prompt_deployment_expand_meta_request_request.py +42 -0
- vellum/types/prompt_execution_meta.py +37 -0
- vellum/types/prompt_output.py +41 -0
- vellum/types/provider_enum.py +17 -12
- vellum/types/raw_prompt_execution_overrides_request.py +32 -0
- vellum/types/rejected_enum.py +5 -0
- vellum/types/rejected_execute_prompt_event.py +36 -0
- vellum/types/rejected_execute_prompt_response.py +39 -0
- vellum/types/rejected_prompt_execution_meta.py +34 -0
- vellum/types/scenario_input_type_enum.py +2 -2
- vellum/types/slim_document.py +7 -7
- vellum/types/streaming_enum.py +5 -0
- vellum/types/streaming_execute_prompt_event.py +40 -0
- vellum/types/streaming_prompt_execution_meta.py +32 -0
- vellum/types/string_variable_value.py +28 -0
- vellum/types/vellum_error_code_enum.py +3 -3
- vellum/types/vellum_variable_type.py +11 -6
- vellum/types/workflow_execution_event_error_code.py +6 -6
- vellum/types/workflow_execution_event_type.py +2 -2
- vellum/types/workflow_node_result_event_state.py +4 -4
- vellum/types/workflow_request_input_request.py +14 -1
- vellum/types/workflow_request_number_input_request.py +29 -0
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/METADATA +1 -1
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/RECORD +63 -38
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/WHEEL +0 -0
vellum/types/__init__.py
CHANGED
@@ -36,10 +36,28 @@ from .document_read import DocumentRead
|
|
36
36
|
from .document_status import DocumentStatus
|
37
37
|
from .enriched_normalized_completion import EnrichedNormalizedCompletion
|
38
38
|
from .environment_enum import EnvironmentEnum
|
39
|
+
from .error_variable_value import ErrorVariableValue
|
39
40
|
from .evaluation_params import EvaluationParams
|
40
41
|
from .evaluation_params_request import EvaluationParamsRequest
|
42
|
+
from .execute_prompt_api_error_response import ExecutePromptApiErrorResponse
|
43
|
+
from .execute_prompt_event import (
|
44
|
+
ExecutePromptEvent,
|
45
|
+
ExecutePromptEvent_Fulfilled,
|
46
|
+
ExecutePromptEvent_Initiated,
|
47
|
+
ExecutePromptEvent_Rejected,
|
48
|
+
ExecutePromptEvent_Streaming,
|
49
|
+
)
|
50
|
+
from .execute_prompt_response import (
|
51
|
+
ExecutePromptResponse,
|
52
|
+
ExecutePromptResponse_Fulfilled,
|
53
|
+
ExecutePromptResponse_Rejected,
|
54
|
+
)
|
41
55
|
from .execute_workflow_stream_error_response import ExecuteWorkflowStreamErrorResponse
|
42
56
|
from .finish_reason_enum import FinishReasonEnum
|
57
|
+
from .fulfilled_enum import FulfilledEnum
|
58
|
+
from .fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
|
59
|
+
from .fulfilled_execute_prompt_response import FulfilledExecutePromptResponse
|
60
|
+
from .fulfilled_prompt_execution_meta import FulfilledPromptExecutionMeta
|
43
61
|
from .generate_error_response import GenerateErrorResponse
|
44
62
|
from .generate_options_request import GenerateOptionsRequest
|
45
63
|
from .generate_request import GenerateRequest
|
@@ -51,7 +69,11 @@ from .generate_stream_response import GenerateStreamResponse
|
|
51
69
|
from .generate_stream_result import GenerateStreamResult
|
52
70
|
from .generate_stream_result_data import GenerateStreamResultData
|
53
71
|
from .indexing_state_enum import IndexingStateEnum
|
72
|
+
from .initiated_enum import InitiatedEnum
|
73
|
+
from .initiated_execute_prompt_event import InitiatedExecutePromptEvent
|
74
|
+
from .initiated_prompt_execution_meta import InitiatedPromptExecutionMeta
|
54
75
|
from .json_input_request import JsonInputRequest
|
76
|
+
from .json_variable_value import JsonVariableValue
|
55
77
|
from .logical_operator import LogicalOperator
|
56
78
|
from .logprobs_enum import LogprobsEnum
|
57
79
|
from .metadata_filter_config_request import MetadataFilterConfigRequest
|
@@ -98,14 +120,17 @@ from .normalized_token_log_probs import NormalizedTokenLogProbs
|
|
98
120
|
from .paginated_slim_document_list import PaginatedSlimDocumentList
|
99
121
|
from .processing_failure_reason_enum import ProcessingFailureReasonEnum
|
100
122
|
from .processing_state_enum import ProcessingStateEnum
|
123
|
+
from .prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
|
101
124
|
from .prompt_deployment_input_request import (
|
102
125
|
PromptDeploymentInputRequest,
|
103
126
|
PromptDeploymentInputRequest_ChatHistory,
|
104
127
|
PromptDeploymentInputRequest_Json,
|
105
128
|
PromptDeploymentInputRequest_String,
|
106
129
|
)
|
130
|
+
from .prompt_execution_meta import PromptExecutionMeta
|
107
131
|
from .prompt_node_result import PromptNodeResult
|
108
132
|
from .prompt_node_result_data import PromptNodeResultData
|
133
|
+
from .prompt_output import PromptOutput, PromptOutput_Error, PromptOutput_Json, PromptOutput_String
|
109
134
|
from .prompt_template_block import PromptTemplateBlock
|
110
135
|
from .prompt_template_block_data import PromptTemplateBlockData
|
111
136
|
from .prompt_template_block_data_request import PromptTemplateBlockDataRequest
|
@@ -113,6 +138,7 @@ from .prompt_template_block_properties import PromptTemplateBlockProperties
|
|
113
138
|
from .prompt_template_block_properties_request import PromptTemplateBlockPropertiesRequest
|
114
139
|
from .prompt_template_block_request import PromptTemplateBlockRequest
|
115
140
|
from .provider_enum import ProviderEnum
|
141
|
+
from .raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
|
116
142
|
from .register_prompt_error_response import RegisterPromptErrorResponse
|
117
143
|
from .register_prompt_model_parameters_request import RegisterPromptModelParametersRequest
|
118
144
|
from .register_prompt_prompt import RegisterPromptPrompt
|
@@ -123,6 +149,10 @@ from .registered_prompt_input_variable_request import RegisteredPromptInputVaria
|
|
123
149
|
from .registered_prompt_model_version import RegisteredPromptModelVersion
|
124
150
|
from .registered_prompt_sandbox import RegisteredPromptSandbox
|
125
151
|
from .registered_prompt_sandbox_snapshot import RegisteredPromptSandboxSnapshot
|
152
|
+
from .rejected_enum import RejectedEnum
|
153
|
+
from .rejected_execute_prompt_event import RejectedExecutePromptEvent
|
154
|
+
from .rejected_execute_prompt_response import RejectedExecutePromptResponse
|
155
|
+
from .rejected_prompt_execution_meta import RejectedPromptExecutionMeta
|
126
156
|
from .sandbox_metric_input_params import SandboxMetricInputParams
|
127
157
|
from .sandbox_metric_input_params_request import SandboxMetricInputParamsRequest
|
128
158
|
from .sandbox_scenario import SandboxScenario
|
@@ -142,7 +172,11 @@ from .search_result_merging_request import SearchResultMergingRequest
|
|
142
172
|
from .search_result_request import SearchResultRequest
|
143
173
|
from .search_weights_request import SearchWeightsRequest
|
144
174
|
from .slim_document import SlimDocument
|
175
|
+
from .streaming_enum import StreamingEnum
|
176
|
+
from .streaming_execute_prompt_event import StreamingExecutePromptEvent
|
177
|
+
from .streaming_prompt_execution_meta import StreamingPromptExecutionMeta
|
145
178
|
from .string_input_request import StringInputRequest
|
179
|
+
from .string_variable_value import StringVariableValue
|
146
180
|
from .submit_completion_actual_request import SubmitCompletionActualRequest
|
147
181
|
from .submit_completion_actuals_error_response import SubmitCompletionActualsErrorResponse
|
148
182
|
from .submit_workflow_execution_actual_request import (
|
@@ -233,9 +267,11 @@ from .workflow_request_input_request import (
|
|
233
267
|
WorkflowRequestInputRequest,
|
234
268
|
WorkflowRequestInputRequest_ChatHistory,
|
235
269
|
WorkflowRequestInputRequest_Json,
|
270
|
+
WorkflowRequestInputRequest_Number,
|
236
271
|
WorkflowRequestInputRequest_String,
|
237
272
|
)
|
238
273
|
from .workflow_request_json_input_request import WorkflowRequestJsonInputRequest
|
274
|
+
from .workflow_request_number_input_request import WorkflowRequestNumberInputRequest
|
239
275
|
from .workflow_request_string_input_request import WorkflowRequestStringInputRequest
|
240
276
|
from .workflow_result_event import WorkflowResultEvent
|
241
277
|
from .workflow_result_event_output_data import (
|
@@ -290,10 +326,24 @@ __all__ = [
|
|
290
326
|
"DocumentStatus",
|
291
327
|
"EnrichedNormalizedCompletion",
|
292
328
|
"EnvironmentEnum",
|
329
|
+
"ErrorVariableValue",
|
293
330
|
"EvaluationParams",
|
294
331
|
"EvaluationParamsRequest",
|
332
|
+
"ExecutePromptApiErrorResponse",
|
333
|
+
"ExecutePromptEvent",
|
334
|
+
"ExecutePromptEvent_Fulfilled",
|
335
|
+
"ExecutePromptEvent_Initiated",
|
336
|
+
"ExecutePromptEvent_Rejected",
|
337
|
+
"ExecutePromptEvent_Streaming",
|
338
|
+
"ExecutePromptResponse",
|
339
|
+
"ExecutePromptResponse_Fulfilled",
|
340
|
+
"ExecutePromptResponse_Rejected",
|
295
341
|
"ExecuteWorkflowStreamErrorResponse",
|
296
342
|
"FinishReasonEnum",
|
343
|
+
"FulfilledEnum",
|
344
|
+
"FulfilledExecutePromptEvent",
|
345
|
+
"FulfilledExecutePromptResponse",
|
346
|
+
"FulfilledPromptExecutionMeta",
|
297
347
|
"GenerateErrorResponse",
|
298
348
|
"GenerateOptionsRequest",
|
299
349
|
"GenerateRequest",
|
@@ -305,7 +355,11 @@ __all__ = [
|
|
305
355
|
"GenerateStreamResult",
|
306
356
|
"GenerateStreamResultData",
|
307
357
|
"IndexingStateEnum",
|
358
|
+
"InitiatedEnum",
|
359
|
+
"InitiatedExecutePromptEvent",
|
360
|
+
"InitiatedPromptExecutionMeta",
|
308
361
|
"JsonInputRequest",
|
362
|
+
"JsonVariableValue",
|
309
363
|
"LogicalOperator",
|
310
364
|
"LogprobsEnum",
|
311
365
|
"MetadataFilterConfigRequest",
|
@@ -348,12 +402,18 @@ __all__ = [
|
|
348
402
|
"PaginatedSlimDocumentList",
|
349
403
|
"ProcessingFailureReasonEnum",
|
350
404
|
"ProcessingStateEnum",
|
405
|
+
"PromptDeploymentExpandMetaRequestRequest",
|
351
406
|
"PromptDeploymentInputRequest",
|
352
407
|
"PromptDeploymentInputRequest_ChatHistory",
|
353
408
|
"PromptDeploymentInputRequest_Json",
|
354
409
|
"PromptDeploymentInputRequest_String",
|
410
|
+
"PromptExecutionMeta",
|
355
411
|
"PromptNodeResult",
|
356
412
|
"PromptNodeResultData",
|
413
|
+
"PromptOutput",
|
414
|
+
"PromptOutput_Error",
|
415
|
+
"PromptOutput_Json",
|
416
|
+
"PromptOutput_String",
|
357
417
|
"PromptTemplateBlock",
|
358
418
|
"PromptTemplateBlockData",
|
359
419
|
"PromptTemplateBlockDataRequest",
|
@@ -361,6 +421,7 @@ __all__ = [
|
|
361
421
|
"PromptTemplateBlockPropertiesRequest",
|
362
422
|
"PromptTemplateBlockRequest",
|
363
423
|
"ProviderEnum",
|
424
|
+
"RawPromptExecutionOverridesRequest",
|
364
425
|
"RegisterPromptErrorResponse",
|
365
426
|
"RegisterPromptModelParametersRequest",
|
366
427
|
"RegisterPromptPrompt",
|
@@ -371,6 +432,10 @@ __all__ = [
|
|
371
432
|
"RegisteredPromptModelVersion",
|
372
433
|
"RegisteredPromptSandbox",
|
373
434
|
"RegisteredPromptSandboxSnapshot",
|
435
|
+
"RejectedEnum",
|
436
|
+
"RejectedExecutePromptEvent",
|
437
|
+
"RejectedExecutePromptResponse",
|
438
|
+
"RejectedPromptExecutionMeta",
|
374
439
|
"SandboxMetricInputParams",
|
375
440
|
"SandboxMetricInputParamsRequest",
|
376
441
|
"SandboxScenario",
|
@@ -390,7 +455,11 @@ __all__ = [
|
|
390
455
|
"SearchResultRequest",
|
391
456
|
"SearchWeightsRequest",
|
392
457
|
"SlimDocument",
|
458
|
+
"StreamingEnum",
|
459
|
+
"StreamingExecutePromptEvent",
|
460
|
+
"StreamingPromptExecutionMeta",
|
393
461
|
"StringInputRequest",
|
462
|
+
"StringVariableValue",
|
394
463
|
"SubmitCompletionActualRequest",
|
395
464
|
"SubmitCompletionActualsErrorResponse",
|
396
465
|
"SubmitWorkflowExecutionActualRequest",
|
@@ -470,8 +539,10 @@ __all__ = [
|
|
470
539
|
"WorkflowRequestInputRequest",
|
471
540
|
"WorkflowRequestInputRequest_ChatHistory",
|
472
541
|
"WorkflowRequestInputRequest_Json",
|
542
|
+
"WorkflowRequestInputRequest_Number",
|
473
543
|
"WorkflowRequestInputRequest_String",
|
474
544
|
"WorkflowRequestJsonInputRequest",
|
545
|
+
"WorkflowRequestNumberInputRequest",
|
475
546
|
"WorkflowRequestStringInputRequest",
|
476
547
|
"WorkflowResultEvent",
|
477
548
|
"WorkflowResultEventOutputData",
|
vellum/types/block_type_enum.py
CHANGED
@@ -8,10 +8,10 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class BlockTypeEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
- `CHAT_MESSAGE` - CHAT_MESSAGE
|
12
|
+
- `CHAT_HISTORY` - CHAT_HISTORY
|
13
|
+
- `JINJA` - JINJA
|
14
|
+
- `FUNCTION_DEFINITION` - FUNCTION_DEFINITION
|
15
15
|
"""
|
16
16
|
|
17
17
|
CHAT_MESSAGE = "CHAT_MESSAGE"
|
@@ -8,10 +8,10 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class ChatMessageRole(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
- `SYSTEM` - System
|
12
|
+
- `ASSISTANT` - Assistant
|
13
|
+
- `USER` - User
|
14
|
+
- `FUNCTION` - Function
|
15
15
|
"""
|
16
16
|
|
17
17
|
SYSTEM = "SYSTEM"
|
vellum/types/deployment_read.py
CHANGED
@@ -23,18 +23,18 @@ class DeploymentRead(pydantic.BaseModel):
|
|
23
23
|
description=(
|
24
24
|
"The current status of the deployment\n"
|
25
25
|
"\n"
|
26
|
-
"
|
27
|
-
"
|
28
|
-
"
|
26
|
+
"- `ACTIVE` - Active\n"
|
27
|
+
"- `INACTIVE` - Inactive\n"
|
28
|
+
"- `ARCHIVED` - Archived\n"
|
29
29
|
)
|
30
30
|
)
|
31
31
|
environment: typing.Optional[EnvironmentEnum] = pydantic.Field(
|
32
32
|
description=(
|
33
33
|
"The environment this deployment is used in\n"
|
34
34
|
"\n"
|
35
|
-
"
|
36
|
-
"
|
37
|
-
"
|
35
|
+
"- `DEVELOPMENT` - Development\n"
|
36
|
+
"- `STAGING` - Staging\n"
|
37
|
+
"- `PRODUCTION` - Production\n"
|
38
38
|
)
|
39
39
|
)
|
40
40
|
active_model_version_ids: typing.List[str] = pydantic.Field(
|
@@ -8,9 +8,9 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class DeploymentStatus(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
- `ACTIVE` - Active
|
12
|
+
- `INACTIVE` - Inactive
|
13
|
+
- `ARCHIVED` - Archived
|
14
14
|
"""
|
15
15
|
|
16
16
|
ACTIVE = "ACTIVE"
|
@@ -21,11 +21,11 @@ class DocumentDocumentToDocumentIndex(pydantic.BaseModel):
|
|
21
21
|
description=(
|
22
22
|
"An enum value representing where this document is along its indexing lifecycle for this index.\n"
|
23
23
|
"\n"
|
24
|
-
"
|
25
|
-
"
|
26
|
-
"
|
27
|
-
"
|
28
|
-
"
|
24
|
+
"- `AWAITING_PROCESSING` - Awaiting Processing\n"
|
25
|
+
"- `QUEUED` - Queued\n"
|
26
|
+
"- `INDEXING` - Indexing\n"
|
27
|
+
"- `INDEXED` - Indexed\n"
|
28
|
+
"- `FAILED` - Failed\n"
|
29
29
|
)
|
30
30
|
)
|
31
31
|
|
@@ -20,16 +20,16 @@ class DocumentIndexRead(pydantic.BaseModel):
|
|
20
20
|
name: str = pydantic.Field(description="A name that uniquely identifies this index within its workspace")
|
21
21
|
status: typing.Optional[DocumentIndexStatus] = pydantic.Field(
|
22
22
|
description=(
|
23
|
-
"The current status of the document index\n" "\n" "
|
23
|
+
"The current status of the document index\n" "\n" "- `ACTIVE` - Active\n" "- `ARCHIVED` - Archived\n"
|
24
24
|
)
|
25
25
|
)
|
26
26
|
environment: typing.Optional[EnvironmentEnum] = pydantic.Field(
|
27
27
|
description=(
|
28
28
|
"The environment this document index is used in\n"
|
29
29
|
"\n"
|
30
|
-
"
|
31
|
-
"
|
32
|
-
"
|
30
|
+
"- `DEVELOPMENT` - Development\n"
|
31
|
+
"- `STAGING` - Staging\n"
|
32
|
+
"- `PRODUCTION` - Production\n"
|
33
33
|
)
|
34
34
|
)
|
35
35
|
indexing_config: typing.Dict[str, typing.Any] = pydantic.Field(
|
vellum/types/document_read.py
CHANGED
@@ -27,14 +27,14 @@ class DocumentRead(pydantic.BaseModel):
|
|
27
27
|
description=(
|
28
28
|
"The current processing state of the document\n"
|
29
29
|
"\n"
|
30
|
-
"
|
31
|
-
"
|
32
|
-
"
|
33
|
-
"
|
30
|
+
"- `QUEUED` - Queued\n"
|
31
|
+
"- `PROCESSING` - Processing\n"
|
32
|
+
"- `PROCESSED` - Processed\n"
|
33
|
+
"- `FAILED` - Failed\n"
|
34
34
|
)
|
35
35
|
)
|
36
36
|
status: typing.Optional[DocumentStatus] = pydantic.Field(
|
37
|
-
description=("The current status of the document\n" "\n" "
|
37
|
+
description=("The current status of the document\n" "\n" "- `ACTIVE` - Active\n")
|
38
38
|
)
|
39
39
|
original_file_url: typing.Optional[str]
|
40
40
|
processed_file_url: typing.Optional[str]
|
@@ -24,9 +24,9 @@ class EnrichedNormalizedCompletion(pydantic.BaseModel):
|
|
24
24
|
description=(
|
25
25
|
"The reason the generation finished.\n"
|
26
26
|
"\n"
|
27
|
-
"
|
28
|
-
"
|
29
|
-
"
|
27
|
+
"- `LENGTH` - LENGTH\n"
|
28
|
+
"- `STOP` - STOP\n"
|
29
|
+
"- `UNKNOWN` - UNKNOWN\n"
|
30
30
|
)
|
31
31
|
)
|
32
32
|
logprobs: typing.Optional[NormalizedLogProbs] = pydantic.Field(
|
vellum/types/environment_enum.py
CHANGED
@@ -8,9 +8,9 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class EnvironmentEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
- `DEVELOPMENT` - Development
|
12
|
+
- `STAGING` - Staging
|
13
|
+
- `PRODUCTION` - Production
|
14
14
|
"""
|
15
15
|
|
16
16
|
DEVELOPMENT = "DEVELOPMENT"
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .vellum_error import VellumError
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class ErrorVariableValue(pydantic.BaseModel):
|
16
|
+
value: typing.Optional[VellumError]
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class ExecutePromptApiErrorResponse(pydantic.BaseModel):
|
15
|
+
detail: str = pydantic.Field(description="Details about why the request failed.")
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
from .fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
|
10
|
+
from .initiated_execute_prompt_event import InitiatedExecutePromptEvent
|
11
|
+
from .rejected_execute_prompt_event import RejectedExecutePromptEvent
|
12
|
+
from .streaming_execute_prompt_event import StreamingExecutePromptEvent
|
13
|
+
|
14
|
+
|
15
|
+
class ExecutePromptEvent_Initiated(InitiatedExecutePromptEvent):
|
16
|
+
state: typing_extensions.Literal["INITIATED"]
|
17
|
+
|
18
|
+
class Config:
|
19
|
+
frozen = True
|
20
|
+
smart_union = True
|
21
|
+
allow_population_by_field_name = True
|
22
|
+
|
23
|
+
|
24
|
+
class ExecutePromptEvent_Streaming(StreamingExecutePromptEvent):
|
25
|
+
state: typing_extensions.Literal["STREAMING"]
|
26
|
+
|
27
|
+
class Config:
|
28
|
+
frozen = True
|
29
|
+
smart_union = True
|
30
|
+
allow_population_by_field_name = True
|
31
|
+
|
32
|
+
|
33
|
+
class ExecutePromptEvent_Fulfilled(FulfilledExecutePromptEvent):
|
34
|
+
state: typing_extensions.Literal["FULFILLED"]
|
35
|
+
|
36
|
+
class Config:
|
37
|
+
frozen = True
|
38
|
+
smart_union = True
|
39
|
+
allow_population_by_field_name = True
|
40
|
+
|
41
|
+
|
42
|
+
class ExecutePromptEvent_Rejected(RejectedExecutePromptEvent):
|
43
|
+
state: typing_extensions.Literal["REJECTED"]
|
44
|
+
|
45
|
+
class Config:
|
46
|
+
frozen = True
|
47
|
+
smart_union = True
|
48
|
+
allow_population_by_field_name = True
|
49
|
+
|
50
|
+
|
51
|
+
ExecutePromptEvent = typing.Union[
|
52
|
+
ExecutePromptEvent_Initiated,
|
53
|
+
ExecutePromptEvent_Streaming,
|
54
|
+
ExecutePromptEvent_Fulfilled,
|
55
|
+
ExecutePromptEvent_Rejected,
|
56
|
+
]
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
from .fulfilled_execute_prompt_response import FulfilledExecutePromptResponse
|
10
|
+
from .rejected_execute_prompt_response import RejectedExecutePromptResponse
|
11
|
+
|
12
|
+
|
13
|
+
class ExecutePromptResponse_Fulfilled(FulfilledExecutePromptResponse):
|
14
|
+
state: typing_extensions.Literal["FULFILLED"]
|
15
|
+
|
16
|
+
class Config:
|
17
|
+
frozen = True
|
18
|
+
smart_union = True
|
19
|
+
allow_population_by_field_name = True
|
20
|
+
|
21
|
+
|
22
|
+
class ExecutePromptResponse_Rejected(RejectedExecutePromptResponse):
|
23
|
+
state: typing_extensions.Literal["REJECTED"]
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
allow_population_by_field_name = True
|
29
|
+
|
30
|
+
|
31
|
+
ExecutePromptResponse = typing.Union[ExecutePromptResponse_Fulfilled, ExecutePromptResponse_Rejected]
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .fulfilled_prompt_execution_meta import FulfilledPromptExecutionMeta
|
8
|
+
from .prompt_output import PromptOutput
|
9
|
+
|
10
|
+
try:
|
11
|
+
import pydantic.v1 as pydantic # type: ignore
|
12
|
+
except ImportError:
|
13
|
+
import pydantic # type: ignore
|
14
|
+
|
15
|
+
|
16
|
+
class FulfilledExecutePromptEvent(pydantic.BaseModel):
|
17
|
+
"""
|
18
|
+
The final data event returned indicating that the stream has ended and all final resolved values from the model can be found.
|
19
|
+
"""
|
20
|
+
|
21
|
+
outputs: typing.List[PromptOutput]
|
22
|
+
execution_id: str
|
23
|
+
meta: typing.Optional[FulfilledPromptExecutionMeta]
|
24
|
+
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().json(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
32
|
+
|
33
|
+
class Config:
|
34
|
+
frozen = True
|
35
|
+
smart_union = True
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .prompt_execution_meta import PromptExecutionMeta
|
8
|
+
from .prompt_output import PromptOutput
|
9
|
+
|
10
|
+
try:
|
11
|
+
import pydantic.v1 as pydantic # type: ignore
|
12
|
+
except ImportError:
|
13
|
+
import pydantic # type: ignore
|
14
|
+
|
15
|
+
|
16
|
+
class FulfilledExecutePromptResponse(pydantic.BaseModel):
|
17
|
+
"""
|
18
|
+
The successful response from the model containing all of the resolved values generated by the prompt.
|
19
|
+
"""
|
20
|
+
|
21
|
+
meta: typing.Optional[PromptExecutionMeta]
|
22
|
+
raw: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
|
23
|
+
description="The subset of the raw response from the model that the request opted into with `expand_raw`."
|
24
|
+
)
|
25
|
+
execution_id: str = pydantic.Field(description="The ID of the execution.")
|
26
|
+
outputs: typing.List[PromptOutput]
|
27
|
+
|
28
|
+
def json(self, **kwargs: typing.Any) -> str:
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
30
|
+
return super().json(**kwargs_with_defaults)
|
31
|
+
|
32
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
34
|
+
return super().dict(**kwargs_with_defaults)
|
35
|
+
|
36
|
+
class Config:
|
37
|
+
frozen = True
|
38
|
+
smart_union = True
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .finish_reason_enum import FinishReasonEnum
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class FulfilledPromptExecutionMeta(pydantic.BaseModel):
|
16
|
+
"""
|
17
|
+
The subset of the metadata tracked by Vellum during prompt execution that the request opted into with `expand_meta`.
|
18
|
+
"""
|
19
|
+
|
20
|
+
latency: typing.Optional[int]
|
21
|
+
finish_reason: typing.Optional[FinishReasonEnum]
|
22
|
+
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().json(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -15,7 +15,7 @@ except ImportError:
|
|
15
15
|
class GenerateOptionsRequest(pydantic.BaseModel):
|
16
16
|
logprobs: typing.Optional[LogprobsEnum] = pydantic.Field(
|
17
17
|
description=(
|
18
|
-
"Which logprobs to include, if any. Defaults to NONE.\n" "\n" "
|
18
|
+
"Which logprobs to include, if any. Defaults to NONE.\n" "\n" "- `ALL` - ALL\n" "- `NONE` - NONE\n"
|
19
19
|
)
|
20
20
|
)
|
21
21
|
|