vellum-ai 0.0.29__tar.gz → 0.0.31__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (155) hide show
  1. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/PKG-INFO +1 -1
  2. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/pyproject.toml +2 -2
  3. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/__init__.py +6 -4
  4. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/client.py +8 -4
  5. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/client_wrapper.py +5 -1
  6. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/documents/client.py +94 -0
  7. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/__init__.py +6 -4
  8. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/deployment_read.py +2 -2
  9. vellum_ai-0.0.29/src/vellum/types/deployment_read_status_enum.py → vellum_ai-0.0.31/src/vellum/types/deployment_status.py +4 -4
  10. vellum_ai-0.0.31/src/vellum/types/document_read.py +53 -0
  11. vellum_ai-0.0.29/src/vellum/types/slim_document_status_enum.py → vellum_ai-0.0.31/src/vellum/types/document_status.py +1 -1
  12. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/slim_document.py +2 -2
  13. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_chat_history_result.py +1 -1
  14. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_json_result.py +1 -1
  15. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_string_result.py +1 -1
  16. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/README.md +0 -0
  17. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/__init__.py +0 -0
  18. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/api_error.py +0 -0
  19. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/datetime_utils.py +0 -0
  20. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/jsonable_encoder.py +0 -0
  21. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/core/remove_none_from_dict.py +0 -0
  22. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/environment.py +0 -0
  23. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/__init__.py +0 -0
  24. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/bad_request_error.py +0 -0
  25. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/conflict_error.py +0 -0
  26. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/forbidden_error.py +0 -0
  27. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/internal_server_error.py +0 -0
  28. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/errors/not_found_error.py +0 -0
  29. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/py.typed +0 -0
  30. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/__init__.py +0 -0
  31. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/deployments/__init__.py +0 -0
  32. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/deployments/client.py +0 -0
  33. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/document_indexes/__init__.py +0 -0
  34. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/document_indexes/client.py +0 -0
  35. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/documents/__init__.py +0 -0
  36. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/model_versions/__init__.py +0 -0
  37. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/model_versions/client.py +0 -0
  38. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/registered_prompts/__init__.py +0 -0
  39. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/registered_prompts/client.py +0 -0
  40. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/sandboxes/__init__.py +0 -0
  41. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/sandboxes/client.py +0 -0
  42. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/test_suites/__init__.py +0 -0
  43. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/resources/test_suites/client.py +0 -0
  44. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/block_type_enum.py +0 -0
  45. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/chat_message.py +0 -0
  46. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/chat_message_request.py +0 -0
  47. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/chat_message_role.py +0 -0
  48. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/conditional_node_result.py +0 -0
  49. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/conditional_node_result_data.py +0 -0
  50. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/content_type.py +0 -0
  51. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/deployment_node_result.py +0 -0
  52. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/deployment_node_result_data.py +0 -0
  53. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/document.py +0 -0
  54. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/document_document_to_document_index.py +0 -0
  55. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/document_index_read.py +0 -0
  56. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/document_index_status.py +0 -0
  57. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/enriched_normalized_completion.py +0 -0
  58. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/environment_enum.py +0 -0
  59. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/evaluation_params.py +0 -0
  60. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/evaluation_params_request.py +0 -0
  61. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
  62. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/finish_reason_enum.py +0 -0
  63. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_error_response.py +0 -0
  64. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_options_request.py +0 -0
  65. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_request.py +0 -0
  66. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_response.py +0 -0
  67. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_result.py +0 -0
  68. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_result_data.py +0 -0
  69. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_result_error.py +0 -0
  70. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_stream_response.py +0 -0
  71. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_stream_result.py +0 -0
  72. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/generate_stream_result_data.py +0 -0
  73. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/indexing_state_enum.py +0 -0
  74. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/input_variable.py +0 -0
  75. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/input_variable_type.py +0 -0
  76. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/logprobs_enum.py +0 -0
  77. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/metadata_filter_config_request.py +0 -0
  78. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
  79. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/metadata_filter_rule_request.py +0 -0
  80. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_type_enum.py +0 -0
  81. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_build_config.py +0 -0
  82. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_compile_prompt_response.py +0 -0
  83. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_compiled_prompt.py +0 -0
  84. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_exec_config.py +0 -0
  85. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
  86. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_read.py +0 -0
  87. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_read_status_enum.py +0 -0
  88. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
  89. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/normalized_log_probs.py +0 -0
  90. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/normalized_token_log_probs.py +0 -0
  91. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/paginated_slim_document_list.py +0 -0
  92. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/processing_failure_reason_enum.py +0 -0
  93. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/processing_state_enum.py +0 -0
  94. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_node_result.py +0 -0
  95. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_node_result_data.py +0 -0
  96. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block.py +0 -0
  97. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block_data.py +0 -0
  98. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block_data_request.py +0 -0
  99. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block_properties.py +0 -0
  100. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block_properties_request.py +0 -0
  101. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/prompt_template_block_request.py +0 -0
  102. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/provider_enum.py +0 -0
  103. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/register_prompt_error_response.py +0 -0
  104. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/register_prompt_model_parameters_request.py +0 -0
  105. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/register_prompt_prompt.py +0 -0
  106. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
  107. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/register_prompt_response.py +0 -0
  108. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/registered_prompt_deployment.py +0 -0
  109. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
  110. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/registered_prompt_model_version.py +0 -0
  111. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/registered_prompt_sandbox.py +0 -0
  112. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
  113. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/sandbox_metric_input_params.py +0 -0
  114. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/sandbox_metric_input_params_request.py +0 -0
  115. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/sandbox_node_result.py +0 -0
  116. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/sandbox_node_result_data.py +0 -0
  117. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/sandbox_scenario.py +0 -0
  118. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/scenario_input.py +0 -0
  119. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/scenario_input_request.py +0 -0
  120. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/scenario_input_type_enum.py +0 -0
  121. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_error_response.py +0 -0
  122. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_filters_request.py +0 -0
  123. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_node_result.py +0 -0
  124. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_node_result_data.py +0 -0
  125. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_request_options_request.py +0 -0
  126. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_response.py +0 -0
  127. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_result.py +0 -0
  128. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_result_merging_request.py +0 -0
  129. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/search_weights_request.py +0 -0
  130. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/submit_completion_actual_request.py +0 -0
  131. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
  132. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_result.py +0 -0
  133. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_result_data.py +0 -0
  134. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/terminal_node_result_output.py +0 -0
  135. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/test_suite_test_case.py +0 -0
  136. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/upload_document_error_response.py +0 -0
  137. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/upload_document_response.py +0 -0
  138. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_event_error.py +0 -0
  139. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_execution_event_error_code.py +0 -0
  140. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_execution_event_type.py +0 -0
  141. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
  142. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
  143. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_node_result_data.py +0 -0
  144. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_node_result_event.py +0 -0
  145. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_node_result_event_state.py +0 -0
  146. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
  147. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_request_input_request.py +0 -0
  148. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_request_json_input_request.py +0 -0
  149. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_request_string_input_request.py +0 -0
  150. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_result_event.py +0 -0
  151. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_result_event_output_data.py +0 -0
  152. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
  153. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
  154. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
  155. {vellum_ai-0.0.29 → vellum_ai-0.0.31}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.0.29
3
+ Version: 0.0.31
4
4
  Summary:
5
5
  Requires-Python: >=3.7,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "vellum-ai"
3
- version = "v0.0.29"
3
+ version = "v0.0.31"
4
4
  description = ""
5
5
  readme = "README.md"
6
6
  authors = []
@@ -10,8 +10,8 @@ packages = [
10
10
 
11
11
  [tool.poetry.dependencies]
12
12
  python = "^3.7"
13
- pydantic = "^1.9.2"
14
13
  httpx = "0.23.3"
14
+ pydantic = "^1.9.2"
15
15
 
16
16
  [tool.poetry.dev-dependencies]
17
17
  mypy = "0.971"
@@ -11,11 +11,13 @@ from .types import (
11
11
  DeploymentNodeResult,
12
12
  DeploymentNodeResultData,
13
13
  DeploymentRead,
14
- DeploymentReadStatusEnum,
14
+ DeploymentStatus,
15
15
  Document,
16
16
  DocumentDocumentToDocumentIndex,
17
17
  DocumentIndexRead,
18
18
  DocumentIndexStatus,
19
+ DocumentRead,
20
+ DocumentStatus,
19
21
  EnrichedNormalizedCompletion,
20
22
  EnvironmentEnum,
21
23
  EvaluationParams,
@@ -90,7 +92,6 @@ from .types import (
90
92
  SearchResultMergingRequest,
91
93
  SearchWeightsRequest,
92
94
  SlimDocument,
93
- SlimDocumentStatusEnum,
94
95
  SubmitCompletionActualRequest,
95
96
  SubmitCompletionActualsErrorResponse,
96
97
  TerminalNodeChatHistoryResult,
@@ -163,11 +164,13 @@ __all__ = [
163
164
  "DeploymentNodeResult",
164
165
  "DeploymentNodeResultData",
165
166
  "DeploymentRead",
166
- "DeploymentReadStatusEnum",
167
+ "DeploymentStatus",
167
168
  "Document",
168
169
  "DocumentDocumentToDocumentIndex",
169
170
  "DocumentIndexRead",
170
171
  "DocumentIndexStatus",
172
+ "DocumentRead",
173
+ "DocumentStatus",
171
174
  "EnrichedNormalizedCompletion",
172
175
  "EnvironmentEnum",
173
176
  "EvaluationParams",
@@ -245,7 +248,6 @@ __all__ = [
245
248
  "SearchResultMergingRequest",
246
249
  "SearchWeightsRequest",
247
250
  "SlimDocument",
248
- "SlimDocumentStatusEnum",
249
251
  "SubmitCompletionActualRequest",
250
252
  "SubmitCompletionActualsErrorResponse",
251
253
  "TerminalNodeChatHistoryResult",
@@ -109,12 +109,14 @@ class Vellum:
109
109
  continue
110
110
  yield pydantic.parse_obj_as(WorkflowStreamEvent, json.loads(_text)) # type: ignore
111
111
  return
112
+ _response.read()
113
+ if _response.status_code == 400:
114
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
112
115
  if _response.status_code == 404:
113
116
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
114
117
  if _response.status_code == 500:
115
118
  raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
116
119
  try:
117
- _response.read()
118
120
  _response_json = _response.json()
119
121
  except JSONDecodeError:
120
122
  raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -218,6 +220,7 @@ class Vellum:
218
220
  continue
219
221
  yield pydantic.parse_obj_as(GenerateStreamResponse, json.loads(_text)) # type: ignore
220
222
  return
223
+ _response.read()
221
224
  if _response.status_code == 400:
222
225
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
223
226
  if _response.status_code == 403:
@@ -227,7 +230,6 @@ class Vellum:
227
230
  if _response.status_code == 500:
228
231
  raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
229
232
  try:
230
- _response.read()
231
233
  _response_json = _response.json()
232
234
  except JSONDecodeError:
233
235
  raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -405,12 +407,14 @@ class AsyncVellum:
405
407
  continue
406
408
  yield pydantic.parse_obj_as(WorkflowStreamEvent, json.loads(_text)) # type: ignore
407
409
  return
410
+ await _response.aread()
411
+ if _response.status_code == 400:
412
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
408
413
  if _response.status_code == 404:
409
414
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
410
415
  if _response.status_code == 500:
411
416
  raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
412
417
  try:
413
- await _response.aread()
414
418
  _response_json = _response.json()
415
419
  except JSONDecodeError:
416
420
  raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -514,6 +518,7 @@ class AsyncVellum:
514
518
  continue
515
519
  yield pydantic.parse_obj_as(GenerateStreamResponse, json.loads(_text)) # type: ignore
516
520
  return
521
+ await _response.aread()
517
522
  if _response.status_code == 400:
518
523
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
519
524
  if _response.status_code == 403:
@@ -523,7 +528,6 @@ class AsyncVellum:
523
528
  if _response.status_code == 500:
524
529
  raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
525
530
  try:
526
- await _response.aread()
527
531
  _response_json = _response.json()
528
532
  except JSONDecodeError:
529
533
  raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -10,7 +10,11 @@ class BaseClientWrapper:
10
10
  self.api_key = api_key
11
11
 
12
12
  def get_headers(self) -> typing.Dict[str, str]:
13
- headers: typing.Dict[str, str] = {}
13
+ headers: typing.Dict[str, str] = {
14
+ "X-Fern-Language": "Python",
15
+ "X-Fern-SDK-Name": "vellum-ai",
16
+ "X-Fern-SDK-Version": "v0.0.31",
17
+ }
14
18
  headers["X_API_KEY"] = self.api_key
15
19
  return headers
16
20
 
@@ -14,6 +14,8 @@ from ...environment import VellumEnvironment
14
14
  from ...errors.bad_request_error import BadRequestError
15
15
  from ...errors.internal_server_error import InternalServerError
16
16
  from ...errors.not_found_error import NotFoundError
17
+ from ...types.document_read import DocumentRead
18
+ from ...types.document_status import DocumentStatus
17
19
  from ...types.paginated_slim_document_list import PaginatedSlimDocumentList
18
20
  from ...types.upload_document_response import UploadDocumentResponse
19
21
 
@@ -68,6 +70,52 @@ class DocumentsClient:
68
70
  raise ApiError(status_code=_response.status_code, body=_response.text)
69
71
  raise ApiError(status_code=_response.status_code, body=_response_json)
70
72
 
73
+ def partial_update(
74
+ self,
75
+ id: str,
76
+ *,
77
+ label: typing.Optional[str] = OMIT,
78
+ status: typing.Optional[DocumentStatus] = OMIT,
79
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
80
+ ) -> DocumentRead:
81
+ """
82
+
83
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
84
+
85
+ Update a Document, keying off of its Vellum-generated ID. Particularly useful for updating its metadata.
86
+
87
+ Parameters:
88
+ - id: str. A UUID string identifying this document.
89
+
90
+ - label: typing.Optional[str]. A human-readable label for the document. Defaults to the originally uploaded file's file name. <span style="white-space: nowrap">`non-empty`</span> <span style="white-space: nowrap">`<= 1000 characters`</span>
91
+
92
+ - status: typing.Optional[DocumentStatus]. The current status of the document
93
+
94
+ * `ACTIVE` - Active
95
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. A JSON object containing any metadata associated with the document that you'd like to filter upon later.
96
+ """
97
+ _request: typing.Dict[str, typing.Any] = {}
98
+ if label is not OMIT:
99
+ _request["label"] = label
100
+ if status is not OMIT:
101
+ _request["status"] = status
102
+ if metadata is not OMIT:
103
+ _request["metadata"] = metadata
104
+ _response = self._client_wrapper.httpx_client.request(
105
+ "PATCH",
106
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/documents/{id}"),
107
+ json=jsonable_encoder(_request),
108
+ headers=self._client_wrapper.get_headers(),
109
+ timeout=None,
110
+ )
111
+ if 200 <= _response.status_code < 300:
112
+ return pydantic.parse_obj_as(DocumentRead, _response.json()) # type: ignore
113
+ try:
114
+ _response_json = _response.json()
115
+ except JSONDecodeError:
116
+ raise ApiError(status_code=_response.status_code, body=_response.text)
117
+ raise ApiError(status_code=_response.status_code, body=_response_json)
118
+
71
119
  def upload(
72
120
  self,
73
121
  *,
@@ -176,6 +224,52 @@ class AsyncDocumentsClient:
176
224
  raise ApiError(status_code=_response.status_code, body=_response.text)
177
225
  raise ApiError(status_code=_response.status_code, body=_response_json)
178
226
 
227
+ async def partial_update(
228
+ self,
229
+ id: str,
230
+ *,
231
+ label: typing.Optional[str] = OMIT,
232
+ status: typing.Optional[DocumentStatus] = OMIT,
233
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
234
+ ) -> DocumentRead:
235
+ """
236
+
237
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
238
+
239
+ Update a Document, keying off of its Vellum-generated ID. Particularly useful for updating its metadata.
240
+
241
+ Parameters:
242
+ - id: str. A UUID string identifying this document.
243
+
244
+ - label: typing.Optional[str]. A human-readable label for the document. Defaults to the originally uploaded file's file name. <span style="white-space: nowrap">`non-empty`</span> <span style="white-space: nowrap">`<= 1000 characters`</span>
245
+
246
+ - status: typing.Optional[DocumentStatus]. The current status of the document
247
+
248
+ * `ACTIVE` - Active
249
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. A JSON object containing any metadata associated with the document that you'd like to filter upon later.
250
+ """
251
+ _request: typing.Dict[str, typing.Any] = {}
252
+ if label is not OMIT:
253
+ _request["label"] = label
254
+ if status is not OMIT:
255
+ _request["status"] = status
256
+ if metadata is not OMIT:
257
+ _request["metadata"] = metadata
258
+ _response = await self._client_wrapper.httpx_client.request(
259
+ "PATCH",
260
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/documents/{id}"),
261
+ json=jsonable_encoder(_request),
262
+ headers=self._client_wrapper.get_headers(),
263
+ timeout=None,
264
+ )
265
+ if 200 <= _response.status_code < 300:
266
+ return pydantic.parse_obj_as(DocumentRead, _response.json()) # type: ignore
267
+ try:
268
+ _response_json = _response.json()
269
+ except JSONDecodeError:
270
+ raise ApiError(status_code=_response.status_code, body=_response.text)
271
+ raise ApiError(status_code=_response.status_code, body=_response_json)
272
+
179
273
  async def upload(
180
274
  self,
181
275
  *,
@@ -10,11 +10,13 @@ from .content_type import ContentType
10
10
  from .deployment_node_result import DeploymentNodeResult
11
11
  from .deployment_node_result_data import DeploymentNodeResultData
12
12
  from .deployment_read import DeploymentRead
13
- from .deployment_read_status_enum import DeploymentReadStatusEnum
13
+ from .deployment_status import DeploymentStatus
14
14
  from .document import Document
15
15
  from .document_document_to_document_index import DocumentDocumentToDocumentIndex
16
16
  from .document_index_read import DocumentIndexRead
17
17
  from .document_index_status import DocumentIndexStatus
18
+ from .document_read import DocumentRead
19
+ from .document_status import DocumentStatus
18
20
  from .enriched_normalized_completion import EnrichedNormalizedCompletion
19
21
  from .environment_enum import EnvironmentEnum
20
22
  from .evaluation_params import EvaluationParams
@@ -89,7 +91,6 @@ from .search_result import SearchResult
89
91
  from .search_result_merging_request import SearchResultMergingRequest
90
92
  from .search_weights_request import SearchWeightsRequest
91
93
  from .slim_document import SlimDocument
92
- from .slim_document_status_enum import SlimDocumentStatusEnum
93
94
  from .submit_completion_actual_request import SubmitCompletionActualRequest
94
95
  from .submit_completion_actuals_error_response import SubmitCompletionActualsErrorResponse
95
96
  from .terminal_node_chat_history_result import TerminalNodeChatHistoryResult
@@ -154,11 +155,13 @@ __all__ = [
154
155
  "DeploymentNodeResult",
155
156
  "DeploymentNodeResultData",
156
157
  "DeploymentRead",
157
- "DeploymentReadStatusEnum",
158
+ "DeploymentStatus",
158
159
  "Document",
159
160
  "DocumentDocumentToDocumentIndex",
160
161
  "DocumentIndexRead",
161
162
  "DocumentIndexStatus",
163
+ "DocumentRead",
164
+ "DocumentStatus",
162
165
  "EnrichedNormalizedCompletion",
163
166
  "EnvironmentEnum",
164
167
  "EvaluationParams",
@@ -233,7 +236,6 @@ __all__ = [
233
236
  "SearchResultMergingRequest",
234
237
  "SearchWeightsRequest",
235
238
  "SlimDocument",
236
- "SlimDocumentStatusEnum",
237
239
  "SubmitCompletionActualRequest",
238
240
  "SubmitCompletionActualsErrorResponse",
239
241
  "TerminalNodeChatHistoryResult",
@@ -6,7 +6,7 @@ import typing
6
6
  import pydantic
7
7
 
8
8
  from ..core.datetime_utils import serialize_datetime
9
- from .deployment_read_status_enum import DeploymentReadStatusEnum
9
+ from .deployment_status import DeploymentStatus
10
10
  from .environment_enum import EnvironmentEnum
11
11
  from .input_variable import InputVariable
12
12
  from .model_type_enum import ModelTypeEnum
@@ -21,7 +21,7 @@ class DeploymentRead(pydantic.BaseModel):
21
21
  name: str = pydantic.Field(
22
22
  description='A name that uniquely identifies this deployment within its workspace <span style="white-space: nowrap">`<= 150 characters`</span> '
23
23
  )
24
- status: typing.Optional[DeploymentReadStatusEnum] = pydantic.Field(
24
+ status: typing.Optional[DeploymentStatus] = pydantic.Field(
25
25
  description=(
26
26
  "The current status of the deployment\n"
27
27
  "\n"
@@ -6,7 +6,7 @@ import typing
6
6
  T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
- class DeploymentReadStatusEnum(str, enum.Enum):
9
+ class DeploymentStatus(str, enum.Enum):
10
10
  """
11
11
  * `ACTIVE` - Active
12
12
  * `INACTIVE` - Inactive
@@ -23,9 +23,9 @@ class DeploymentReadStatusEnum(str, enum.Enum):
23
23
  inactive: typing.Callable[[], T_Result],
24
24
  archived: typing.Callable[[], T_Result],
25
25
  ) -> T_Result:
26
- if self is DeploymentReadStatusEnum.ACTIVE:
26
+ if self is DeploymentStatus.ACTIVE:
27
27
  return active()
28
- if self is DeploymentReadStatusEnum.INACTIVE:
28
+ if self is DeploymentStatus.INACTIVE:
29
29
  return inactive()
30
- if self is DeploymentReadStatusEnum.ARCHIVED:
30
+ if self is DeploymentStatus.ARCHIVED:
31
31
  return archived()
@@ -0,0 +1,53 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ import pydantic
7
+
8
+ from ..core.datetime_utils import serialize_datetime
9
+ from .document_document_to_document_index import DocumentDocumentToDocumentIndex
10
+ from .document_status import DocumentStatus
11
+ from .processing_state_enum import ProcessingStateEnum
12
+
13
+
14
+ class DocumentRead(pydantic.BaseModel):
15
+ id: str
16
+ external_id: typing.Optional[str] = pydantic.Field(
17
+ description="The unique id of this document as it exists in the user's system."
18
+ )
19
+ last_uploaded_at: str
20
+ label: str = pydantic.Field(
21
+ description='A human-readable label for the document. Defaults to the originally uploaded file\'s file name. <span style="white-space: nowrap">`<= 1000 characters`</span> '
22
+ )
23
+ processing_state: typing.Optional[ProcessingStateEnum] = pydantic.Field(
24
+ description=(
25
+ "The current processing state of the document\n"
26
+ "\n"
27
+ "* `QUEUED` - Queued\n"
28
+ "* `PROCESSING` - Processing\n"
29
+ "* `PROCESSED` - Processed\n"
30
+ "* `FAILED` - Failed\n"
31
+ )
32
+ )
33
+ status: typing.Optional[DocumentStatus] = pydantic.Field(
34
+ description=("The current status of the document\n" "\n" "* `ACTIVE` - Active\n")
35
+ )
36
+ original_file_url: typing.Optional[str]
37
+ processed_file_url: typing.Optional[str]
38
+ document_to_document_indexes: typing.List[DocumentDocumentToDocumentIndex]
39
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
40
+ description="A previously supplied JSON object containing metadata that can filtered on when searching."
41
+ )
42
+
43
+ def json(self, **kwargs: typing.Any) -> str:
44
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
+ return super().json(**kwargs_with_defaults)
46
+
47
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
48
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
49
+ return super().dict(**kwargs_with_defaults)
50
+
51
+ class Config:
52
+ frozen = True
53
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -2,4 +2,4 @@
2
2
 
3
3
  import typing_extensions
4
4
 
5
- SlimDocumentStatusEnum = typing_extensions.Literal["ACTIVE"]
5
+ DocumentStatus = typing_extensions.Literal["ACTIVE"]
@@ -7,9 +7,9 @@ import pydantic
7
7
 
8
8
  from ..core.datetime_utils import serialize_datetime
9
9
  from .document_document_to_document_index import DocumentDocumentToDocumentIndex
10
+ from .document_status import DocumentStatus
10
11
  from .processing_failure_reason_enum import ProcessingFailureReasonEnum
11
12
  from .processing_state_enum import ProcessingStateEnum
12
- from .slim_document_status_enum import SlimDocumentStatusEnum
13
13
 
14
14
 
15
15
  class SlimDocument(pydantic.BaseModel):
@@ -41,7 +41,7 @@ class SlimDocument(pydantic.BaseModel):
41
41
  "* `INVALID_FILE` - Invalid File\n"
42
42
  )
43
43
  )
44
- status: typing.Optional[SlimDocumentStatusEnum] = pydantic.Field(
44
+ status: typing.Optional[DocumentStatus] = pydantic.Field(
45
45
  description=("The document's current status.\n" "\n" "* `ACTIVE` - Active\n")
46
46
  )
47
47
  keywords: typing.Optional[typing.List[str]] = pydantic.Field(
@@ -11,7 +11,7 @@ from .chat_message import ChatMessage
11
11
 
12
12
  class TerminalNodeChatHistoryResult(pydantic.BaseModel):
13
13
  name: str = pydantic.Field(description="The unique name given to the terminal node that produced this output.")
14
- value: typing.List[ChatMessage]
14
+ value: typing.Optional[typing.List[ChatMessage]]
15
15
 
16
16
  def json(self, **kwargs: typing.Any) -> str:
17
17
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class TerminalNodeJsonResult(pydantic.BaseModel):
12
12
  name: str = pydantic.Field(description="The unique name given to the terminal node that produced this output.")
13
- value: typing.Dict[str, typing.Any]
13
+ value: typing.Optional[typing.Dict[str, typing.Any]]
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
10
10
 
11
11
  class TerminalNodeStringResult(pydantic.BaseModel):
12
12
  name: str = pydantic.Field(description="The unique name given to the terminal node that produced this output.")
13
- value: str
13
+ value: typing.Optional[str]
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
File without changes