vellum-ai 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (82) hide show
  1. vellum/__init__.py +36 -20
  2. vellum/client.py +270 -101
  3. vellum/core/__init__.py +11 -2
  4. vellum/core/client_wrapper.py +27 -0
  5. vellum/core/remove_none_from_dict.py +11 -0
  6. vellum/resources/deployments/client.py +35 -15
  7. vellum/resources/document_indexes/client.py +64 -16
  8. vellum/resources/documents/client.py +110 -35
  9. vellum/resources/model_versions/client.py +67 -25
  10. vellum/resources/registered_prompts/client.py +80 -16
  11. vellum/resources/sandboxes/client.py +90 -25
  12. vellum/resources/test_suites/client.py +90 -25
  13. vellum/types/__init__.py +22 -4
  14. vellum/types/deployment_read.py +2 -6
  15. vellum/types/document.py +3 -7
  16. vellum/types/document_document_to_document_index.py +2 -2
  17. vellum/types/document_index_read.py +3 -7
  18. vellum/types/enriched_normalized_completion.py +5 -9
  19. vellum/types/evaluation_params.py +1 -3
  20. vellum/types/evaluation_params_request.py +1 -3
  21. vellum/types/generate_error_response.py +1 -1
  22. vellum/types/generate_request.py +3 -7
  23. vellum/types/generate_result.py +2 -6
  24. vellum/types/generate_result_data.py +1 -1
  25. vellum/types/generate_result_error.py +1 -1
  26. vellum/types/model_version_build_config.py +2 -6
  27. vellum/types/model_version_compile_prompt_response.py +1 -1
  28. vellum/types/model_version_compiled_prompt.py +2 -4
  29. vellum/types/model_version_exec_config.py +3 -3
  30. vellum/types/model_version_read.py +7 -10
  31. vellum/types/model_version_sandbox_snapshot.py +3 -5
  32. vellum/types/prompt_template_block_properties_request.py +2 -2
  33. vellum/types/prompt_template_block_request.py +1 -1
  34. vellum/types/prompt_template_input_variable.py +1 -1
  35. vellum/types/prompt_template_input_variable_request.py +1 -1
  36. vellum/types/provider_enum.py +5 -0
  37. vellum/types/register_prompt_error_response.py +1 -1
  38. vellum/types/register_prompt_prompt.py +2 -2
  39. vellum/types/register_prompt_prompt_info_request.py +1 -1
  40. vellum/types/register_prompt_response.py +5 -7
  41. vellum/types/registered_prompt_deployment.py +3 -3
  42. vellum/types/registered_prompt_model_version.py +2 -2
  43. vellum/types/registered_prompt_sandbox.py +2 -2
  44. vellum/types/registered_prompt_sandbox_snapshot.py +1 -1
  45. vellum/types/sandbox_scenario.py +2 -2
  46. vellum/types/scenario_input_request.py +1 -1
  47. vellum/types/search_error_response.py +1 -1
  48. vellum/types/search_filters_request.py +1 -1
  49. vellum/types/search_request_options_request.py +4 -6
  50. vellum/types/search_response.py +1 -1
  51. vellum/types/search_result.py +3 -3
  52. vellum/types/search_result_merging_request.py +1 -1
  53. vellum/types/search_weights_request.py +2 -2
  54. vellum/types/slim_document.py +5 -9
  55. vellum/types/submit_completion_actual_request.py +5 -15
  56. vellum/types/terminal_node_chat_history_result.py +1 -1
  57. vellum/types/terminal_node_json_result.py +1 -1
  58. vellum/types/terminal_node_result_output.py +2 -4
  59. vellum/types/terminal_node_string_result.py +1 -1
  60. vellum/types/test_suite_test_case.py +4 -8
  61. vellum/types/upload_document_response.py +1 -1
  62. vellum/types/workflow_event_error.py +26 -0
  63. vellum/types/workflow_execution_event_error_code.py +24 -0
  64. vellum/types/workflow_node_result_data.py +7 -11
  65. vellum/types/workflow_node_result_event.py +4 -3
  66. vellum/types/{workflow_node_result_event_state_enum.py → workflow_node_result_event_state.py} +5 -5
  67. vellum/types/workflow_request_chat_history_input_request.py +1 -3
  68. vellum/types/workflow_request_input_request.py +2 -6
  69. vellum/types/workflow_request_json_input_request.py +1 -3
  70. vellum/types/workflow_request_string_input_request.py +1 -3
  71. vellum/types/workflow_result_event.py +6 -3
  72. vellum/types/workflow_result_event_output_data.py +40 -0
  73. vellum/types/workflow_result_event_output_data_chat_history.py +32 -0
  74. vellum/types/workflow_result_event_output_data_json.py +31 -0
  75. vellum/types/workflow_result_event_output_data_string.py +33 -0
  76. vellum/types/workflow_stream_event.py +1 -4
  77. {vellum_ai-0.0.20.dist-info → vellum_ai-0.0.22.dist-info}/METADATA +1 -1
  78. vellum_ai-0.0.22.dist-info/RECORD +148 -0
  79. vellum/core/remove_none_from_headers.py +0 -11
  80. vellum/types/workflow_result_event_state_enum.py +0 -31
  81. vellum_ai-0.0.20.dist-info/RECORD +0 -142
  82. {vellum_ai-0.0.20.dist-info → vellum_ai-0.0.22.dist-info}/WHEEL +0 -0
@@ -14,14 +14,10 @@ class DocumentIndexRead(pydantic.BaseModel):
14
14
  id: str
15
15
  created: str
16
16
  label: str = pydantic.Field(
17
- description=(
18
- 'A human-readable label for the document index <span style="white-space: nowrap">`<= 150 characters`</span> \n'
19
- )
17
+ description='A human-readable label for the document index <span style="white-space: nowrap">`<= 150 characters`</span> '
20
18
  )
21
19
  name: str = pydantic.Field(
22
- description=(
23
- 'A name that uniquely identifies this index within its workspace <span style="white-space: nowrap">`<= 150 characters`</span> \n'
24
- )
20
+ description='A name that uniquely identifies this index within its workspace <span style="white-space: nowrap">`<= 150 characters`</span> '
25
21
  )
26
22
  status: typing.Optional[DocumentIndexStatus] = pydantic.Field(
27
23
  description=(
@@ -38,7 +34,7 @@ class DocumentIndexRead(pydantic.BaseModel):
38
34
  )
39
35
  )
40
36
  indexing_config: typing.Dict[str, typing.Any] = pydantic.Field(
41
- description=("Configuration representing how documents should be indexed\n")
37
+ description="Configuration representing how documents should be indexed"
42
38
  )
43
39
 
44
40
  def json(self, **kwargs: typing.Any) -> str:
@@ -12,13 +12,11 @@ from .normalized_log_probs import NormalizedLogProbs
12
12
 
13
13
 
14
14
  class EnrichedNormalizedCompletion(pydantic.BaseModel):
15
- id: str = pydantic.Field(description=("The Vellum-generated ID of the completion.\n"))
15
+ id: str = pydantic.Field(description="The Vellum-generated ID of the completion.")
16
16
  external_id: typing.Optional[str] = pydantic.Field(
17
- description=(
18
- "The external ID that was originally provided along with the generation request, which uniquely identifies this generation in an external system.\n"
19
- )
17
+ description="The external ID that was originally provided along with the generation request, which uniquely identifies this generation in an external system."
20
18
  )
21
- text: str = pydantic.Field(description=("The text generated by the LLM.\n"))
19
+ text: str = pydantic.Field(description="The text generated by the LLM.")
22
20
  finish_reason: typing.Optional[FinishReasonEnum] = pydantic.Field(
23
21
  description=(
24
22
  "The reason the generation finished.\n"
@@ -29,11 +27,9 @@ class EnrichedNormalizedCompletion(pydantic.BaseModel):
29
27
  )
30
28
  )
31
29
  logprobs: typing.Optional[NormalizedLogProbs] = pydantic.Field(
32
- description=("The logprobs of the completion. Only present if specified in the original request options.\n")
33
- )
34
- model_version_id: str = pydantic.Field(
35
- description=("The ID of the model version used to generate this completion.\n")
30
+ description="The logprobs of the completion. Only present if specified in the original request options."
36
31
  )
32
+ model_version_id: str = pydantic.Field(description="The ID of the model version used to generate this completion.")
37
33
  type: typing.Optional[ContentType]
38
34
 
39
35
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,9 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class EvaluationParams(pydantic.BaseModel):
12
12
  target: typing.Optional[str] = pydantic.Field(
13
- description=(
14
- "The target value to compare the LLM output against. Typically what you expect or desire the LLM output to be.\n"
15
- )
13
+ description="The target value to compare the LLM output against. Typically what you expect or desire the LLM output to be."
16
14
  )
17
15
 
18
16
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,9 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class EvaluationParamsRequest(pydantic.BaseModel):
12
12
  target: typing.Optional[str] = pydantic.Field(
13
- description=(
14
- "The target value to compare the LLM output against. Typically what you expect or desire the LLM output to be.\n"
15
- )
13
+ description="The target value to compare the LLM output against. Typically what you expect or desire the LLM output to be."
16
14
  )
17
15
 
18
16
  def json(self, **kwargs: typing.Any) -> str:
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class GenerateErrorResponse(pydantic.BaseModel):
12
- detail: str = pydantic.Field(description=("Details about why the request failed.\n"))
12
+ detail: str = pydantic.Field(description="Details about why the request failed.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -11,17 +11,13 @@ from .chat_message_request import ChatMessageRequest
11
11
 
12
12
  class GenerateRequest(pydantic.BaseModel):
13
13
  input_values: typing.Dict[str, typing.Any] = pydantic.Field(
14
- description=("Key/value pairs for each template variable defined in the deployment's prompt.\n")
14
+ description="Key/value pairs for each template variable defined in the deployment's prompt."
15
15
  )
16
16
  chat_history: typing.Optional[typing.List[ChatMessageRequest]] = pydantic.Field(
17
- description=(
18
- "Optionally provide a list of chat messages that'll be used in place of the special {$chat_history} variable, if included in the prompt.\n"
19
- )
17
+ description="Optionally provide a list of chat messages that'll be used in place of the special {$chat_history} variable, if included in the prompt."
20
18
  )
21
19
  external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
22
- description=(
23
- "Optionally include a unique identifier for each generation, as represented outside of Vellum. Note that this should generally be a list of length one.\n"
24
- )
20
+ description="Optionally include a unique identifier for each generation, as represented outside of Vellum. Note that this should generally be a list of length one."
25
21
  )
26
22
 
27
23
  def json(self, **kwargs: typing.Any) -> str:
@@ -12,14 +12,10 @@ from .generate_result_error import GenerateResultError
12
12
 
13
13
  class GenerateResult(pydantic.BaseModel):
14
14
  data: typing.Optional[GenerateResultData] = pydantic.Field(
15
- description=(
16
- "An object containing the resulting generation. This key will be absent if the LLM provider experienced an error.\n"
17
- )
15
+ description="An object containing the resulting generation. This key will be absent if the LLM provider experienced an error."
18
16
  )
19
17
  error: typing.Optional[GenerateResultError] = pydantic.Field(
20
- description=(
21
- "An object containing details about the error that occurred. This key will be absent if the LLM provider did not experience an error.\n"
22
- )
18
+ description="An object containing details about the error that occurred. This key will be absent if the LLM provider did not experience an error."
23
19
  )
24
20
 
25
21
  def json(self, **kwargs: typing.Any) -> str:
@@ -11,7 +11,7 @@ from .enriched_normalized_completion import EnrichedNormalizedCompletion
11
11
 
12
12
  class GenerateResultData(pydantic.BaseModel):
13
13
  completions: typing.List[EnrichedNormalizedCompletion] = pydantic.Field(
14
- description=("The generated completions. This will generally be a list of length one.\n")
14
+ description="The generated completions. This will generally be a list of length one."
15
15
  )
16
16
 
17
17
  def json(self, **kwargs: typing.Any) -> str:
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class GenerateResultError(pydantic.BaseModel):
12
- message: str = pydantic.Field(description=("The error message returned by the LLM provider.\n"))
12
+ message: str = pydantic.Field(description="The error message returned by the LLM provider.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -11,14 +11,10 @@ from .model_version_sandbox_snapshot import ModelVersionSandboxSnapshot
11
11
 
12
12
  class ModelVersionBuildConfig(pydantic.BaseModel):
13
13
  base_model: str = pydantic.Field(
14
- description=(
15
- "The name of the base model used to create this model version, as identified by the LLM provider.\n"
16
- )
14
+ description="The name of the base model used to create this model version, as identified by the LLM provider."
17
15
  )
18
16
  sandbox_snapshot: typing.Optional[ModelVersionSandboxSnapshot] = pydantic.Field(
19
- description=(
20
- "Information about the sandbox snapshot that was used to create this model version, if applicable.\n"
21
- )
17
+ description="Information about the sandbox snapshot that was used to create this model version, if applicable."
22
18
  )
23
19
 
24
20
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,7 +10,7 @@ from .model_version_compiled_prompt import ModelVersionCompiledPrompt
10
10
 
11
11
 
12
12
  class ModelVersionCompilePromptResponse(pydantic.BaseModel):
13
- prompt: ModelVersionCompiledPrompt = pydantic.Field(description=("Information about the compiled prompt.\n"))
13
+ prompt: ModelVersionCompiledPrompt = pydantic.Field(description="Information about the compiled prompt.")
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -10,11 +10,9 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class ModelVersionCompiledPrompt(pydantic.BaseModel):
12
12
  text: str = pydantic.Field(
13
- description=(
14
- "The fully compiled prompt in normalized ChatML syntax after all variable substitutions and templating functions are applied.\n"
15
- )
13
+ description="The fully compiled prompt in normalized ChatML syntax after all variable substitutions and templating functions are applied."
16
14
  )
17
- num_tokens: int = pydantic.Field(description=("The approximate number of tokens used by the compiled prompt.\n"))
15
+ num_tokens: int = pydantic.Field(description="The approximate number of tokens used by the compiled prompt.")
18
16
 
19
17
  def json(self, **kwargs: typing.Any) -> str:
20
18
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -13,13 +13,13 @@ from .prompt_template_input_variable import PromptTemplateInputVariable
13
13
 
14
14
  class ModelVersionExecConfig(pydantic.BaseModel):
15
15
  parameters: ModelVersionExecConfigParameters = pydantic.Field(
16
- description=("The generation parameters that are passed to the LLM provider at runtime.\n")
16
+ description="The generation parameters that are passed to the LLM provider at runtime."
17
17
  )
18
18
  input_variables: typing.List[PromptTemplateInputVariable] = pydantic.Field(
19
- description=("Names of the input variables specified in the prompt template.\n")
19
+ description="Names of the input variables specified in the prompt template."
20
20
  )
21
21
  prompt_template: typing.Optional[str] = pydantic.Field(
22
- description=("The template used to generate prompts for this model version.\n")
22
+ description="The template used to generate prompts for this model version."
23
23
  )
24
24
  prompt_block_data: typing.Optional[PromptTemplateBlockData]
25
25
  prompt_syntax_version: typing.Optional[int]
@@ -14,12 +14,10 @@ from .provider_enum import ProviderEnum
14
14
 
15
15
 
16
16
  class ModelVersionRead(pydantic.BaseModel):
17
- id: str = pydantic.Field(description=("Vellum-generated ID that uniquely identifies this model version.\n"))
18
- created: str = pydantic.Field(description=("Timestamp of when this model version was created.\n"))
17
+ id: str = pydantic.Field(description="Vellum-generated ID that uniquely identifies this model version.")
18
+ created: str = pydantic.Field(description="Timestamp of when this model version was created.")
19
19
  label: str = pydantic.Field(
20
- description=(
21
- 'Human-friendly name for this model version. <span style="white-space: nowrap">`<= 150 characters`</span> \n'
22
- )
20
+ description='Human-friendly name for this model version. <span style="white-space: nowrap">`<= 150 characters`</span> '
23
21
  )
24
22
  model_type: ModelTypeEnum = pydantic.Field(
25
23
  description=(
@@ -35,20 +33,19 @@ class ModelVersionRead(pydantic.BaseModel):
35
33
  "* `GOOGLE` - Google\n"
36
34
  "* `HOSTED` - Hosted\n"
37
35
  "* `MOSAICML` - MosaicML\n"
36
+ "* `MYSTIC` - Mystic\n"
38
37
  "* `OPENAI` - OpenAI\n"
39
38
  "* `PYQ` - Pyq\n"
40
39
  )
41
40
  )
42
41
  external_id: str = pydantic.Field(
43
- description=(
44
- 'The unique id of this model version as it exists in the above provider\'s system. <span style="white-space: nowrap">`<= 250 characters`</span> \n'
45
- )
42
+ description='The unique id of this model version as it exists in the above provider\'s system. <span style="white-space: nowrap">`<= 250 characters`</span> '
46
43
  )
47
44
  build_config: ModelVersionBuildConfig = pydantic.Field(
48
- description=("Configuration used to build this model version.\n")
45
+ description="Configuration used to build this model version."
49
46
  )
50
47
  exec_config: ModelVersionExecConfig = pydantic.Field(
51
- description=("Configuration used to execute this model version.\n")
48
+ description="Configuration used to execute this model version."
52
49
  )
53
50
  status: typing.Optional[ModelVersionReadStatusEnum]
54
51
 
@@ -9,11 +9,9 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class ModelVersionSandboxSnapshot(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the sandbox snapshot.\n"))
13
- prompt_index: typing.Optional[int] = pydantic.Field(
14
- description=("The index of the prompt in the sandbox snapshot.\n")
15
- )
16
- prompt_id: typing.Optional[str] = pydantic.Field(description=("The id of the prompt in the sandbox snapshot.\n"))
12
+ id: str = pydantic.Field(description="The ID of the sandbox snapshot.")
13
+ prompt_index: typing.Optional[int] = pydantic.Field(description="The index of the prompt in the sandbox snapshot.")
14
+ prompt_id: typing.Optional[str] = pydantic.Field(description="The id of the prompt in the sandbox snapshot.")
17
15
  sandbox_id: typing.Optional[str]
18
16
 
19
17
  def json(self, **kwargs: typing.Any) -> str:
@@ -18,10 +18,10 @@ class PromptTemplateBlockPropertiesRequest(pydantic.BaseModel):
18
18
  template: typing.Optional[str]
19
19
  template_type: typing.Optional[ContentType]
20
20
  function_name: typing.Optional[str] = pydantic.Field(
21
- description=('<span style="white-space: nowrap">`non-empty`</span>\n')
21
+ description='<span style="white-space: nowrap">`non-empty`</span>'
22
22
  )
23
23
  function_description: typing.Optional[str] = pydantic.Field(
24
- description=('<span style="white-space: nowrap">`non-empty`</span>\n')
24
+ description='<span style="white-space: nowrap">`non-empty`</span>'
25
25
  )
26
26
  function_parameters: typing.Optional[typing.Dict[str, typing.Any]]
27
27
  blocks: typing.Optional[typing.List[PromptTemplateBlockRequest]]
@@ -12,7 +12,7 @@ from .block_type_enum import BlockTypeEnum
12
12
 
13
13
 
14
14
  class PromptTemplateBlockRequest(pydantic.BaseModel):
15
- id: str = pydantic.Field(description=('<span style="white-space: nowrap">`non-empty`</span>\n'))
15
+ id: str = pydantic.Field(description='<span style="white-space: nowrap">`non-empty`</span>')
16
16
  block_type: BlockTypeEnum
17
17
  properties: PromptTemplateBlockPropertiesRequest
18
18
 
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class PromptTemplateInputVariable(pydantic.BaseModel):
12
- key: str = pydantic.Field(description=("The name of the input variable.\n"))
12
+ key: str = pydantic.Field(description="The name of the input variable.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class PromptTemplateInputVariableRequest(pydantic.BaseModel):
12
12
  key: str = pydantic.Field(
13
- description=('The name of the input variable. <span style="white-space: nowrap">`non-empty`</span> \n')
13
+ description='The name of the input variable. <span style="white-space: nowrap">`non-empty`</span> '
14
14
  )
15
15
 
16
16
  def json(self, **kwargs: typing.Any) -> str:
@@ -13,6 +13,7 @@ class ProviderEnum(str, enum.Enum):
13
13
  * `GOOGLE` - Google
14
14
  * `HOSTED` - Hosted
15
15
  * `MOSAICML` - MosaicML
16
+ * `MYSTIC` - Mystic
16
17
  * `OPENAI` - OpenAI
17
18
  * `PYQ` - Pyq
18
19
  """
@@ -22,6 +23,7 @@ class ProviderEnum(str, enum.Enum):
22
23
  GOOGLE = "GOOGLE"
23
24
  HOSTED = "HOSTED"
24
25
  MOSAICML = "MOSAICML"
26
+ MYSTIC = "MYSTIC"
25
27
  OPENAI = "OPENAI"
26
28
  PYQ = "PYQ"
27
29
 
@@ -32,6 +34,7 @@ class ProviderEnum(str, enum.Enum):
32
34
  google: typing.Callable[[], T_Result],
33
35
  hosted: typing.Callable[[], T_Result],
34
36
  mosaicml: typing.Callable[[], T_Result],
37
+ mystic: typing.Callable[[], T_Result],
35
38
  openai: typing.Callable[[], T_Result],
36
39
  pyq: typing.Callable[[], T_Result],
37
40
  ) -> T_Result:
@@ -45,6 +48,8 @@ class ProviderEnum(str, enum.Enum):
45
48
  return hosted()
46
49
  if self is ProviderEnum.MOSAICML:
47
50
  return mosaicml()
51
+ if self is ProviderEnum.MYSTIC:
52
+ return mystic()
48
53
  if self is ProviderEnum.OPENAI:
49
54
  return openai()
50
55
  if self is ProviderEnum.PYQ:
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisterPromptErrorResponse(pydantic.BaseModel):
12
- detail: str = pydantic.Field(description=("Details about why the request failed.\n"))
12
+ detail: str = pydantic.Field(description="Details about why the request failed.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisterPromptPrompt(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the generated prompt.\n"))
13
- label: str = pydantic.Field(description=("A human-friendly label for the generated prompt.\n"))
12
+ id: str = pydantic.Field(description="The ID of the generated prompt.")
13
+ label: str = pydantic.Field(description="A human-friendly label for the generated prompt.")
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -14,7 +14,7 @@ class RegisterPromptPromptInfoRequest(pydantic.BaseModel):
14
14
  prompt_syntax_version: typing.Optional[int]
15
15
  prompt_block_data: PromptTemplateBlockDataRequest
16
16
  input_variables: typing.List[PromptTemplateInputVariableRequest] = pydantic.Field(
17
- description=("Names of the input variables specified in the prompt template.\n")
17
+ description="Names of the input variables specified in the prompt template."
18
18
  )
19
19
 
20
20
  def json(self, **kwargs: typing.Any) -> str:
@@ -14,17 +14,15 @@ from .registered_prompt_sandbox_snapshot import RegisteredPromptSandboxSnapshot
14
14
 
15
15
 
16
16
  class RegisterPromptResponse(pydantic.BaseModel):
17
- prompt: RegisterPromptPrompt = pydantic.Field(description=("Information about the generated prompt\n"))
17
+ prompt: RegisterPromptPrompt = pydantic.Field(description="Information about the generated prompt")
18
18
  sandbox_snapshot: RegisteredPromptSandboxSnapshot = pydantic.Field(
19
- description=("Information about the generated sandbox snapshot\n")
19
+ description="Information about the generated sandbox snapshot"
20
20
  )
21
- sandbox: RegisteredPromptSandbox = pydantic.Field(description=("Information about the generated sandbox\n"))
21
+ sandbox: RegisteredPromptSandbox = pydantic.Field(description="Information about the generated sandbox")
22
22
  model_version: RegisteredPromptModelVersion = pydantic.Field(
23
- description=("Information about the generated model version\n")
24
- )
25
- deployment: RegisteredPromptDeployment = pydantic.Field(
26
- description=("Information about the generated deployment\n")
23
+ description="Information about the generated model version"
27
24
  )
25
+ deployment: RegisteredPromptDeployment = pydantic.Field(description="Information about the generated deployment")
28
26
 
29
27
  def json(self, **kwargs: typing.Any) -> str:
30
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,9 +9,9 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisteredPromptDeployment(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the generated deployment.\n"))
13
- name: str = pydantic.Field(description=("A uniquely-identifying name for generated deployment.\n"))
14
- label: str = pydantic.Field(description=("A human-friendly label for the generated deployment.\n"))
12
+ id: str = pydantic.Field(description="The ID of the generated deployment.")
13
+ name: str = pydantic.Field(description="A uniquely-identifying name for generated deployment.")
14
+ label: str = pydantic.Field(description="A human-friendly label for the generated deployment.")
15
15
 
16
16
  def json(self, **kwargs: typing.Any) -> str:
17
17
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisteredPromptModelVersion(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the generated model version.\n"))
13
- label: str = pydantic.Field(description=("A human-friendly label for the generated model version.\n"))
12
+ id: str = pydantic.Field(description="The ID of the generated model version.")
13
+ label: str = pydantic.Field(description="A human-friendly label for the generated model version.")
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisteredPromptSandbox(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the generated sandbox.\n"))
13
- label: str = pydantic.Field(description=("A human-friendly label for the generated sandbox.\n"))
12
+ id: str = pydantic.Field(description="The ID of the generated sandbox.")
13
+ label: str = pydantic.Field(description="A human-friendly label for the generated sandbox.")
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class RegisteredPromptSandboxSnapshot(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the generated sandbox snapshot.\n"))
12
+ id: str = pydantic.Field(description="The ID of the generated sandbox snapshot.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -12,8 +12,8 @@ from .scenario_input import ScenarioInput
12
12
 
13
13
  class SandboxScenario(pydantic.BaseModel):
14
14
  label: typing.Optional[str]
15
- inputs: typing.List[ScenarioInput] = pydantic.Field(description=("The inputs for the scenario\n"))
16
- id: str = pydantic.Field(description=("The id of the scenario\n"))
15
+ inputs: typing.List[ScenarioInput] = pydantic.Field(description="The inputs for the scenario")
16
+ id: str = pydantic.Field(description="The id of the scenario")
17
17
  metric_input_params: SandboxMetricInputParams
18
18
 
19
19
  def json(self, **kwargs: typing.Any) -> str:
@@ -11,7 +11,7 @@ from .scenario_input_type_enum import ScenarioInputTypeEnum
11
11
 
12
12
 
13
13
  class ScenarioInputRequest(pydantic.BaseModel):
14
- key: str = pydantic.Field(description=('<span style="white-space: nowrap">`non-empty`</span>\n'))
14
+ key: str = pydantic.Field(description='<span style="white-space: nowrap">`non-empty`</span>')
15
15
  type: typing.Optional[ScenarioInputTypeEnum]
16
16
  value: typing.Optional[str]
17
17
  chat_history: typing.Optional[typing.List[ChatMessageRequest]]
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class SearchErrorResponse(pydantic.BaseModel):
12
- detail: str = pydantic.Field(description=("Details about why the request failed.\n"))
12
+ detail: str = pydantic.Field(description="Details about why the request failed.")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class SearchFiltersRequest(pydantic.BaseModel):
12
12
  external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
13
- description=("The document external IDs to filter by\n")
13
+ description="The document external IDs to filter by"
14
14
  )
15
15
 
16
16
  def json(self, **kwargs: typing.Any) -> str:
@@ -12,16 +12,14 @@ from .search_weights_request import SearchWeightsRequest
12
12
 
13
13
 
14
14
  class SearchRequestOptionsRequest(pydantic.BaseModel):
15
- limit: typing.Optional[int] = pydantic.Field(description=("The maximum number of results to return.\n"))
15
+ limit: typing.Optional[int] = pydantic.Field(description="The maximum number of results to return.")
16
16
  weights: typing.Optional[SearchWeightsRequest] = pydantic.Field(
17
- description=("The weights to use for the search. Must add up to 1.0.\n")
17
+ description="The weights to use for the search. Must add up to 1.0."
18
18
  )
19
19
  result_merging: typing.Optional[SearchResultMergingRequest] = pydantic.Field(
20
- description=("The configuration for merging results.\n")
21
- )
22
- filters: typing.Optional[SearchFiltersRequest] = pydantic.Field(
23
- description=("The filters to apply to the search.\n")
20
+ description="The configuration for merging results."
24
21
  )
22
+ filters: typing.Optional[SearchFiltersRequest] = pydantic.Field(description="The filters to apply to the search.")
25
23
 
26
24
  def json(self, **kwargs: typing.Any) -> str:
27
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -11,7 +11,7 @@ from .search_result import SearchResult
11
11
 
12
12
  class SearchResponse(pydantic.BaseModel):
13
13
  results: typing.List[SearchResult] = pydantic.Field(
14
- description=("The results of the search. Each result represents a chunk that matches the search query.\n")
14
+ description="The results of the search. Each result represents a chunk that matches the search query."
15
15
  )
16
16
 
17
17
  def json(self, **kwargs: typing.Any) -> str:
@@ -11,11 +11,11 @@ from .document import Document
11
11
 
12
12
  class SearchResult(pydantic.BaseModel):
13
13
  document: Document = pydantic.Field(
14
- description=("The document that contains the chunk that matched the search query.\n")
14
+ description="The document that contains the chunk that matched the search query."
15
15
  )
16
- text: str = pydantic.Field(description=("The text of the chunk that matched the search query.\n"))
16
+ text: str = pydantic.Field(description="The text of the chunk that matched the search query.")
17
17
  keywords: typing.List[str]
18
- score: float = pydantic.Field(description=("A score representing how well the chunk matches the search query.\n"))
18
+ score: float = pydantic.Field(description="A score representing how well the chunk matches the search query.")
19
19
 
20
20
  def json(self, **kwargs: typing.Any) -> str:
21
21
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class SearchResultMergingRequest(pydantic.BaseModel):
12
- enabled: bool = pydantic.Field(description=("Whether to enable merging results\n"))
12
+ enabled: bool = pydantic.Field(description="Whether to enable merging results")
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class SearchWeightsRequest(pydantic.BaseModel):
12
- semantic_similarity: float = pydantic.Field(description=("The relative weight to give to semantic similarity\n"))
13
- keywords: float = pydantic.Field(description=("The relative weight to give to keyword matches\n"))
12
+ semantic_similarity: float = pydantic.Field(description="The relative weight to give to semantic similarity")
13
+ keywords: float = pydantic.Field(description="The relative weight to give to keyword matches")
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -13,17 +13,15 @@ from .slim_document_status_enum import SlimDocumentStatusEnum
13
13
 
14
14
 
15
15
  class SlimDocument(pydantic.BaseModel):
16
- id: str = pydantic.Field(description=("Vellum-generated ID that uniquely identifies this document.\n"))
16
+ id: str = pydantic.Field(description="Vellum-generated ID that uniquely identifies this document.")
17
17
  external_id: typing.Optional[str] = pydantic.Field(
18
- description=("The external ID that was originally provided when uploading the document.\n")
18
+ description="The external ID that was originally provided when uploading the document."
19
19
  )
20
20
  last_uploaded_at: str = pydantic.Field(
21
- description=("A timestamp representing when this document was most recently uploaded.\n")
21
+ description="A timestamp representing when this document was most recently uploaded."
22
22
  )
23
23
  label: str = pydantic.Field(
24
- description=(
25
- 'Human-friendly name for this document. <span style="white-space: nowrap">`<= 1000 characters`</span> \n'
26
- )
24
+ description='Human-friendly name for this document. <span style="white-space: nowrap">`<= 1000 characters`</span> '
27
25
  )
28
26
  processing_state: typing.Optional[ProcessingStateEnum] = pydantic.Field(
29
27
  description=(
@@ -46,9 +44,7 @@ class SlimDocument(pydantic.BaseModel):
46
44
  description=("The document's current status.\n" "\n" "* `ACTIVE` - Active\n")
47
45
  )
48
46
  keywords: typing.Optional[typing.List[str]] = pydantic.Field(
49
- description=(
50
- "A list of keywords associated with this document. Originally provided when uploading the document.\n"
51
- )
47
+ description="A list of keywords associated with this document. Originally provided when uploading the document."
52
48
  )
53
49
  document_to_document_indexes: typing.List[DocumentDocumentToDocumentIndex]
54
50