vellum-ai 0.1.8__tar.gz → 0.1.10__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (234) hide show
  1. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/PKG-INFO +1 -1
  2. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/pyproject.toml +1 -1
  3. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/__init__.py +70 -0
  4. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/client.py +331 -5
  5. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/client_wrapper.py +1 -1
  6. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/forbidden_error.py +3 -2
  7. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/registered_prompts/client.py +2 -0
  8. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/__init__.py +71 -0
  9. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/block_type_enum.py +4 -4
  10. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/chat_message_role.py +4 -4
  11. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/deployment_read.py +6 -6
  12. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/deployment_status.py +3 -3
  13. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/document_document_to_document_index.py +5 -5
  14. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/document_index_read.py +4 -4
  15. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/document_index_status.py +2 -2
  16. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/document_read.py +5 -5
  17. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/enriched_normalized_completion.py +3 -3
  18. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/environment_enum.py +3 -3
  19. vellum_ai-0.1.10/src/vellum/types/error_variable_value.py +29 -0
  20. vellum_ai-0.1.10/src/vellum/types/execute_prompt_api_error_response.py +28 -0
  21. vellum_ai-0.1.10/src/vellum/types/execute_prompt_event.py +56 -0
  22. vellum_ai-0.1.10/src/vellum/types/execute_prompt_response.py +31 -0
  23. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/finish_reason_enum.py +3 -3
  24. vellum_ai-0.1.10/src/vellum/types/fulfilled_enum.py +5 -0
  25. vellum_ai-0.1.10/src/vellum/types/fulfilled_execute_prompt_event.py +36 -0
  26. vellum_ai-0.1.10/src/vellum/types/fulfilled_execute_prompt_response.py +39 -0
  27. vellum_ai-0.1.10/src/vellum/types/fulfilled_prompt_execution_meta.py +34 -0
  28. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_options_request.py +1 -1
  29. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/indexing_state_enum.py +5 -5
  30. vellum_ai-0.1.10/src/vellum/types/initiated_enum.py +5 -0
  31. vellum_ai-0.1.10/src/vellum/types/initiated_execute_prompt_event.py +34 -0
  32. vellum_ai-0.1.10/src/vellum/types/initiated_prompt_execution_meta.py +35 -0
  33. vellum_ai-0.1.10/src/vellum/types/json_variable_value.py +28 -0
  34. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/logical_operator.py +18 -18
  35. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/logprobs_enum.py +2 -2
  36. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/metadata_filter_rule_combinator.py +2 -2
  37. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_read.py +13 -12
  38. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_read_status_enum.py +4 -4
  39. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/processing_failure_reason_enum.py +2 -2
  40. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/processing_state_enum.py +4 -4
  41. vellum_ai-0.1.10/src/vellum/types/prompt_deployment_expand_meta_request_request.py +42 -0
  42. vellum_ai-0.1.10/src/vellum/types/prompt_execution_meta.py +37 -0
  43. vellum_ai-0.1.10/src/vellum/types/prompt_output.py +41 -0
  44. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/provider_enum.py +17 -12
  45. vellum_ai-0.1.10/src/vellum/types/raw_prompt_execution_overrides_request.py +32 -0
  46. vellum_ai-0.1.10/src/vellum/types/rejected_enum.py +5 -0
  47. vellum_ai-0.1.10/src/vellum/types/rejected_execute_prompt_event.py +36 -0
  48. vellum_ai-0.1.10/src/vellum/types/rejected_execute_prompt_response.py +39 -0
  49. vellum_ai-0.1.10/src/vellum/types/rejected_prompt_execution_meta.py +34 -0
  50. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/scenario_input_type_enum.py +2 -2
  51. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/slim_document.py +7 -7
  52. vellum_ai-0.1.10/src/vellum/types/streaming_enum.py +5 -0
  53. vellum_ai-0.1.10/src/vellum/types/streaming_execute_prompt_event.py +40 -0
  54. vellum_ai-0.1.10/src/vellum/types/streaming_prompt_execution_meta.py +32 -0
  55. vellum_ai-0.1.10/src/vellum/types/string_variable_value.py +28 -0
  56. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/vellum_error_code_enum.py +3 -3
  57. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/vellum_variable_type.py +11 -6
  58. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_event_error_code.py +6 -6
  59. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_event_type.py +2 -2
  60. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_node_result_event_state.py +4 -4
  61. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_request_input_request.py +14 -1
  62. vellum_ai-0.1.10/src/vellum/types/workflow_request_number_input_request.py +29 -0
  63. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/README.md +0 -0
  64. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/__init__.py +0 -0
  65. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/api_error.py +0 -0
  66. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/datetime_utils.py +0 -0
  67. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/jsonable_encoder.py +0 -0
  68. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/core/remove_none_from_dict.py +0 -0
  69. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/environment.py +0 -0
  70. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/__init__.py +0 -0
  71. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/bad_request_error.py +0 -0
  72. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/conflict_error.py +0 -0
  73. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/internal_server_error.py +0 -0
  74. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/errors/not_found_error.py +0 -0
  75. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/py.typed +0 -0
  76. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/__init__.py +0 -0
  77. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/deployments/__init__.py +0 -0
  78. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/deployments/client.py +0 -0
  79. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/document_indexes/__init__.py +0 -0
  80. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/document_indexes/client.py +0 -0
  81. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/documents/__init__.py +0 -0
  82. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/documents/client.py +0 -0
  83. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/model_versions/__init__.py +0 -0
  84. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/model_versions/client.py +0 -0
  85. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/registered_prompts/__init__.py +0 -0
  86. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/sandboxes/__init__.py +0 -0
  87. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/sandboxes/client.py +0 -0
  88. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/test_suites/__init__.py +0 -0
  89. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/resources/test_suites/client.py +0 -0
  90. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/api_node_result.py +0 -0
  91. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/api_node_result_data.py +0 -0
  92. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/chat_history_input_request.py +0 -0
  93. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/chat_message.py +0 -0
  94. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/chat_message_request.py +0 -0
  95. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_chat_history_result.py +0 -0
  96. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_error_result.py +0 -0
  97. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_json_result.py +0 -0
  98. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_number_result.py +0 -0
  99. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_result.py +0 -0
  100. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_result_data.py +0 -0
  101. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_result_output.py +0 -0
  102. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_search_results_result.py +0 -0
  103. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/code_execution_node_string_result.py +0 -0
  104. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/conditional_node_result.py +0 -0
  105. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/conditional_node_result_data.py +0 -0
  106. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/deployment_provider_payload_response.py +0 -0
  107. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/document_status.py +0 -0
  108. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/evaluation_params.py +0 -0
  109. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/evaluation_params_request.py +0 -0
  110. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
  111. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_error_response.py +0 -0
  112. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_request.py +0 -0
  113. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_response.py +0 -0
  114. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_result.py +0 -0
  115. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_result_data.py +0 -0
  116. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_result_error.py +0 -0
  117. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_stream_response.py +0 -0
  118. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_stream_result.py +0 -0
  119. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/generate_stream_result_data.py +0 -0
  120. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/json_input_request.py +0 -0
  121. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/metadata_filter_config_request.py +0 -0
  122. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/metadata_filter_rule_request.py +0 -0
  123. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_build_config.py +0 -0
  124. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_exec_config.py +0 -0
  125. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
  126. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
  127. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_chat_history_variable_value_request.py +0 -0
  128. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_error_variable_value_request.py +0 -0
  129. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_json_variable_value_request.py +0 -0
  130. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_number_variable_value_request.py +0 -0
  131. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_search_results_variable_value_request.py +0 -0
  132. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_string_variable_value_request.py +0 -0
  133. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/named_test_case_variable_value_request.py +0 -0
  134. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
  135. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_error_value.py +0 -0
  136. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_json_value.py +0 -0
  137. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_number_value.py +0 -0
  138. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
  139. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_compiled_string_value.py +0 -0
  140. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
  141. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/normalized_log_probs.py +0 -0
  142. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/normalized_token_log_probs.py +0 -0
  143. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/paginated_slim_document_list.py +0 -0
  144. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_deployment_input_request.py +0 -0
  145. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_node_result.py +0 -0
  146. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_node_result_data.py +0 -0
  147. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block.py +0 -0
  148. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block_data.py +0 -0
  149. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block_data_request.py +0 -0
  150. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block_properties.py +0 -0
  151. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block_properties_request.py +0 -0
  152. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/prompt_template_block_request.py +0 -0
  153. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/register_prompt_error_response.py +0 -0
  154. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/register_prompt_model_parameters_request.py +0 -0
  155. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/register_prompt_prompt.py +0 -0
  156. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
  157. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/register_prompt_response.py +0 -0
  158. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/registered_prompt_deployment.py +0 -0
  159. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
  160. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/registered_prompt_model_version.py +0 -0
  161. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/registered_prompt_sandbox.py +0 -0
  162. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
  163. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/sandbox_metric_input_params.py +0 -0
  164. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/sandbox_metric_input_params_request.py +0 -0
  165. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/sandbox_scenario.py +0 -0
  166. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/scenario_input.py +0 -0
  167. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/scenario_input_request.py +0 -0
  168. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_error_response.py +0 -0
  169. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_filters_request.py +0 -0
  170. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_node_result.py +0 -0
  171. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_node_result_data.py +0 -0
  172. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_request_options_request.py +0 -0
  173. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_response.py +0 -0
  174. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_result.py +0 -0
  175. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_result_document.py +0 -0
  176. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_result_document_request.py +0 -0
  177. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_result_merging_request.py +0 -0
  178. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_result_request.py +0 -0
  179. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/search_weights_request.py +0 -0
  180. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/string_input_request.py +0 -0
  181. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/submit_completion_actual_request.py +0 -0
  182. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
  183. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
  184. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_chat_history_result.py +0 -0
  185. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_error_result.py +0 -0
  186. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_json_result.py +0 -0
  187. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_number_result.py +0 -0
  188. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_result.py +0 -0
  189. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_result_data.py +0 -0
  190. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_result_output.py +0 -0
  191. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_search_results_result.py +0 -0
  192. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/templating_node_string_result.py +0 -0
  193. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
  194. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_error_result.py +0 -0
  195. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_json_result.py +0 -0
  196. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_number_result.py +0 -0
  197. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_result.py +0 -0
  198. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_result_data.py +0 -0
  199. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_result_output.py +0 -0
  200. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_search_results_result.py +0 -0
  201. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/terminal_node_string_result.py +0 -0
  202. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_chat_history_variable_value.py +0 -0
  203. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_error_variable_value.py +0 -0
  204. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_json_variable_value.py +0 -0
  205. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_number_variable_value.py +0 -0
  206. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_search_results_variable_value.py +0 -0
  207. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_string_variable_value.py +0 -0
  208. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_case_variable_value.py +0 -0
  209. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/test_suite_test_case.py +0 -0
  210. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/upload_document_error_response.py +0 -0
  211. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/upload_document_response.py +0 -0
  212. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/vellum_error.py +0 -0
  213. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/vellum_error_request.py +0 -0
  214. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/vellum_variable.py +0 -0
  215. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_event_error.py +0 -0
  216. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
  217. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
  218. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
  219. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
  220. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
  221. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_node_result_data.py +0 -0
  222. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_node_result_event.py +0 -0
  223. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
  224. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_request_json_input_request.py +0 -0
  225. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_request_string_input_request.py +0 -0
  226. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event.py +0 -0
  227. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data.py +0 -0
  228. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
  229. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
  230. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
  231. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
  232. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
  233. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
  234. {vellum_ai-0.1.8 → vellum_ai-0.1.10}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary:
5
5
  Requires-Python: >=3.7,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "vellum-ai"
3
- version = "0.1.8"
3
+ version = "v0.1.10"
4
4
  description = ""
5
5
  readme = "README.md"
6
6
  authors = []
@@ -35,10 +35,24 @@ from .types import (
35
35
  DocumentStatus,
36
36
  EnrichedNormalizedCompletion,
37
37
  EnvironmentEnum,
38
+ ErrorVariableValue,
38
39
  EvaluationParams,
39
40
  EvaluationParamsRequest,
41
+ ExecutePromptApiErrorResponse,
42
+ ExecutePromptEvent,
43
+ ExecutePromptEvent_Fulfilled,
44
+ ExecutePromptEvent_Initiated,
45
+ ExecutePromptEvent_Rejected,
46
+ ExecutePromptEvent_Streaming,
47
+ ExecutePromptResponse,
48
+ ExecutePromptResponse_Fulfilled,
49
+ ExecutePromptResponse_Rejected,
40
50
  ExecuteWorkflowStreamErrorResponse,
41
51
  FinishReasonEnum,
52
+ FulfilledEnum,
53
+ FulfilledExecutePromptEvent,
54
+ FulfilledExecutePromptResponse,
55
+ FulfilledPromptExecutionMeta,
42
56
  GenerateErrorResponse,
43
57
  GenerateOptionsRequest,
44
58
  GenerateRequest,
@@ -50,7 +64,11 @@ from .types import (
50
64
  GenerateStreamResult,
51
65
  GenerateStreamResultData,
52
66
  IndexingStateEnum,
67
+ InitiatedEnum,
68
+ InitiatedExecutePromptEvent,
69
+ InitiatedPromptExecutionMeta,
53
70
  JsonInputRequest,
71
+ JsonVariableValue,
54
72
  LogicalOperator,
55
73
  LogprobsEnum,
56
74
  MetadataFilterConfigRequest,
@@ -93,12 +111,18 @@ from .types import (
93
111
  PaginatedSlimDocumentList,
94
112
  ProcessingFailureReasonEnum,
95
113
  ProcessingStateEnum,
114
+ PromptDeploymentExpandMetaRequestRequest,
96
115
  PromptDeploymentInputRequest,
97
116
  PromptDeploymentInputRequest_ChatHistory,
98
117
  PromptDeploymentInputRequest_Json,
99
118
  PromptDeploymentInputRequest_String,
119
+ PromptExecutionMeta,
100
120
  PromptNodeResult,
101
121
  PromptNodeResultData,
122
+ PromptOutput,
123
+ PromptOutput_Error,
124
+ PromptOutput_Json,
125
+ PromptOutput_String,
102
126
  PromptTemplateBlock,
103
127
  PromptTemplateBlockData,
104
128
  PromptTemplateBlockDataRequest,
@@ -106,6 +130,7 @@ from .types import (
106
130
  PromptTemplateBlockPropertiesRequest,
107
131
  PromptTemplateBlockRequest,
108
132
  ProviderEnum,
133
+ RawPromptExecutionOverridesRequest,
109
134
  RegisterPromptErrorResponse,
110
135
  RegisterPromptModelParametersRequest,
111
136
  RegisterPromptPrompt,
@@ -116,6 +141,10 @@ from .types import (
116
141
  RegisteredPromptModelVersion,
117
142
  RegisteredPromptSandbox,
118
143
  RegisteredPromptSandboxSnapshot,
144
+ RejectedEnum,
145
+ RejectedExecutePromptEvent,
146
+ RejectedExecutePromptResponse,
147
+ RejectedPromptExecutionMeta,
119
148
  SandboxMetricInputParams,
120
149
  SandboxMetricInputParamsRequest,
121
150
  SandboxScenario,
@@ -135,7 +164,11 @@ from .types import (
135
164
  SearchResultRequest,
136
165
  SearchWeightsRequest,
137
166
  SlimDocument,
167
+ StreamingEnum,
168
+ StreamingExecutePromptEvent,
169
+ StreamingPromptExecutionMeta,
138
170
  StringInputRequest,
171
+ StringVariableValue,
139
172
  SubmitCompletionActualRequest,
140
173
  SubmitCompletionActualsErrorResponse,
141
174
  SubmitWorkflowExecutionActualRequest,
@@ -215,8 +248,10 @@ from .types import (
215
248
  WorkflowRequestInputRequest,
216
249
  WorkflowRequestInputRequest_ChatHistory,
217
250
  WorkflowRequestInputRequest_Json,
251
+ WorkflowRequestInputRequest_Number,
218
252
  WorkflowRequestInputRequest_String,
219
253
  WorkflowRequestJsonInputRequest,
254
+ WorkflowRequestNumberInputRequest,
220
255
  WorkflowRequestStringInputRequest,
221
256
  WorkflowResultEvent,
222
257
  WorkflowResultEventOutputData,
@@ -285,11 +320,25 @@ __all__ = [
285
320
  "DocumentStatus",
286
321
  "EnrichedNormalizedCompletion",
287
322
  "EnvironmentEnum",
323
+ "ErrorVariableValue",
288
324
  "EvaluationParams",
289
325
  "EvaluationParamsRequest",
326
+ "ExecutePromptApiErrorResponse",
327
+ "ExecutePromptEvent",
328
+ "ExecutePromptEvent_Fulfilled",
329
+ "ExecutePromptEvent_Initiated",
330
+ "ExecutePromptEvent_Rejected",
331
+ "ExecutePromptEvent_Streaming",
332
+ "ExecutePromptResponse",
333
+ "ExecutePromptResponse_Fulfilled",
334
+ "ExecutePromptResponse_Rejected",
290
335
  "ExecuteWorkflowStreamErrorResponse",
291
336
  "FinishReasonEnum",
292
337
  "ForbiddenError",
338
+ "FulfilledEnum",
339
+ "FulfilledExecutePromptEvent",
340
+ "FulfilledExecutePromptResponse",
341
+ "FulfilledPromptExecutionMeta",
293
342
  "GenerateErrorResponse",
294
343
  "GenerateOptionsRequest",
295
344
  "GenerateRequest",
@@ -301,8 +350,12 @@ __all__ = [
301
350
  "GenerateStreamResult",
302
351
  "GenerateStreamResultData",
303
352
  "IndexingStateEnum",
353
+ "InitiatedEnum",
354
+ "InitiatedExecutePromptEvent",
355
+ "InitiatedPromptExecutionMeta",
304
356
  "InternalServerError",
305
357
  "JsonInputRequest",
358
+ "JsonVariableValue",
306
359
  "LogicalOperator",
307
360
  "LogprobsEnum",
308
361
  "MetadataFilterConfigRequest",
@@ -346,12 +399,18 @@ __all__ = [
346
399
  "PaginatedSlimDocumentList",
347
400
  "ProcessingFailureReasonEnum",
348
401
  "ProcessingStateEnum",
402
+ "PromptDeploymentExpandMetaRequestRequest",
349
403
  "PromptDeploymentInputRequest",
350
404
  "PromptDeploymentInputRequest_ChatHistory",
351
405
  "PromptDeploymentInputRequest_Json",
352
406
  "PromptDeploymentInputRequest_String",
407
+ "PromptExecutionMeta",
353
408
  "PromptNodeResult",
354
409
  "PromptNodeResultData",
410
+ "PromptOutput",
411
+ "PromptOutput_Error",
412
+ "PromptOutput_Json",
413
+ "PromptOutput_String",
355
414
  "PromptTemplateBlock",
356
415
  "PromptTemplateBlockData",
357
416
  "PromptTemplateBlockDataRequest",
@@ -359,6 +418,7 @@ __all__ = [
359
418
  "PromptTemplateBlockPropertiesRequest",
360
419
  "PromptTemplateBlockRequest",
361
420
  "ProviderEnum",
421
+ "RawPromptExecutionOverridesRequest",
362
422
  "RegisterPromptErrorResponse",
363
423
  "RegisterPromptModelParametersRequest",
364
424
  "RegisterPromptPrompt",
@@ -369,6 +429,10 @@ __all__ = [
369
429
  "RegisteredPromptModelVersion",
370
430
  "RegisteredPromptSandbox",
371
431
  "RegisteredPromptSandboxSnapshot",
432
+ "RejectedEnum",
433
+ "RejectedExecutePromptEvent",
434
+ "RejectedExecutePromptResponse",
435
+ "RejectedPromptExecutionMeta",
372
436
  "SandboxMetricInputParams",
373
437
  "SandboxMetricInputParamsRequest",
374
438
  "SandboxScenario",
@@ -388,7 +452,11 @@ __all__ = [
388
452
  "SearchResultRequest",
389
453
  "SearchWeightsRequest",
390
454
  "SlimDocument",
455
+ "StreamingEnum",
456
+ "StreamingExecutePromptEvent",
457
+ "StreamingPromptExecutionMeta",
391
458
  "StringInputRequest",
459
+ "StringVariableValue",
392
460
  "SubmitCompletionActualRequest",
393
461
  "SubmitCompletionActualsErrorResponse",
394
462
  "SubmitWorkflowExecutionActualRequest",
@@ -469,8 +537,10 @@ __all__ = [
469
537
  "WorkflowRequestInputRequest",
470
538
  "WorkflowRequestInputRequest_ChatHistory",
471
539
  "WorkflowRequestInputRequest_Json",
540
+ "WorkflowRequestInputRequest_Number",
472
541
  "WorkflowRequestInputRequest_String",
473
542
  "WorkflowRequestJsonInputRequest",
543
+ "WorkflowRequestNumberInputRequest",
474
544
  "WorkflowRequestStringInputRequest",
475
545
  "WorkflowResultEvent",
476
546
  "WorkflowResultEventOutputData",
@@ -22,11 +22,15 @@ from .resources.model_versions.client import AsyncModelVersionsClient, ModelVers
22
22
  from .resources.registered_prompts.client import AsyncRegisteredPromptsClient, RegisteredPromptsClient
23
23
  from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
24
24
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
25
- from .types.generate_error_response import GenerateErrorResponse
25
+ from .types.execute_prompt_event import ExecutePromptEvent
26
+ from .types.execute_prompt_response import ExecutePromptResponse
26
27
  from .types.generate_options_request import GenerateOptionsRequest
27
28
  from .types.generate_request import GenerateRequest
28
29
  from .types.generate_response import GenerateResponse
29
30
  from .types.generate_stream_response import GenerateStreamResponse
31
+ from .types.prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
32
+ from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
33
+ from .types.raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
30
34
  from .types.search_request_options_request import SearchRequestOptionsRequest
31
35
  from .types.search_response import SearchResponse
32
36
  from .types.submit_completion_actual_request import SubmitCompletionActualRequest
@@ -66,6 +70,167 @@ class Vellum:
66
70
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
67
71
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
68
72
 
73
+ def execute_prompt(
74
+ self,
75
+ *,
76
+ inputs: typing.List[PromptDeploymentInputRequest],
77
+ prompt_deployment_id: typing.Optional[str] = OMIT,
78
+ prompt_deployment_name: typing.Optional[str] = OMIT,
79
+ release_tag: typing.Optional[str] = OMIT,
80
+ external_id: typing.Optional[str] = OMIT,
81
+ expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
82
+ raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
83
+ expand_raw: typing.Optional[typing.List[str]] = OMIT,
84
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
85
+ ) -> ExecutePromptResponse:
86
+ """
87
+ Executes a deployed Prompt and returns the result.
88
+
89
+ Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
90
+ In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
91
+
92
+ Parameters:
93
+ - inputs: typing.List[PromptDeploymentInputRequest].
94
+
95
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
96
+
97
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
98
+
99
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
100
+
101
+ - external_id: typing.Optional[str].
102
+
103
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
104
+
105
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
106
+
107
+ - expand_raw: typing.Optional[typing.List[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
108
+
109
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]].
110
+ """
111
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
112
+ if prompt_deployment_id is not OMIT:
113
+ _request["prompt_deployment_id"] = prompt_deployment_id
114
+ if prompt_deployment_name is not OMIT:
115
+ _request["prompt_deployment_name"] = prompt_deployment_name
116
+ if release_tag is not OMIT:
117
+ _request["release_tag"] = release_tag
118
+ if external_id is not OMIT:
119
+ _request["external_id"] = external_id
120
+ if expand_meta is not OMIT:
121
+ _request["expand_meta"] = expand_meta
122
+ if raw_overrides is not OMIT:
123
+ _request["raw_overrides"] = raw_overrides
124
+ if expand_raw is not OMIT:
125
+ _request["expand_raw"] = expand_raw
126
+ if metadata is not OMIT:
127
+ _request["metadata"] = metadata
128
+ _response = self._client_wrapper.httpx_client.request(
129
+ "POST",
130
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().predict}/", "v1/execute-prompt"),
131
+ json=jsonable_encoder(_request),
132
+ headers=self._client_wrapper.get_headers(),
133
+ timeout=None,
134
+ )
135
+ if 200 <= _response.status_code < 300:
136
+ return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
137
+ if _response.status_code == 400:
138
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
139
+ if _response.status_code == 403:
140
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
141
+ if _response.status_code == 404:
142
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
143
+ if _response.status_code == 500:
144
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
145
+ try:
146
+ _response_json = _response.json()
147
+ except JSONDecodeError:
148
+ raise ApiError(status_code=_response.status_code, body=_response.text)
149
+ raise ApiError(status_code=_response.status_code, body=_response_json)
150
+
151
+ def execute_prompt_stream(
152
+ self,
153
+ *,
154
+ inputs: typing.List[PromptDeploymentInputRequest],
155
+ prompt_deployment_id: typing.Optional[str] = OMIT,
156
+ prompt_deployment_name: typing.Optional[str] = OMIT,
157
+ release_tag: typing.Optional[str] = OMIT,
158
+ external_id: typing.Optional[str] = OMIT,
159
+ expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
160
+ raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
161
+ expand_raw: typing.Optional[typing.List[str]] = OMIT,
162
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
163
+ ) -> typing.Iterator[ExecutePromptEvent]:
164
+ """
165
+ Executes a deployed Prompt and streams back the results.
166
+
167
+ Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
168
+ In the meantime, we recommend still using the `/generate-stream` endpoint for prompts with function calling
169
+
170
+ Parameters:
171
+ - inputs: typing.List[PromptDeploymentInputRequest].
172
+
173
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
174
+
175
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
176
+
177
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
178
+
179
+ - external_id: typing.Optional[str].
180
+
181
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
182
+
183
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
184
+
185
+ - expand_raw: typing.Optional[typing.List[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
186
+
187
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]].
188
+ """
189
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
190
+ if prompt_deployment_id is not OMIT:
191
+ _request["prompt_deployment_id"] = prompt_deployment_id
192
+ if prompt_deployment_name is not OMIT:
193
+ _request["prompt_deployment_name"] = prompt_deployment_name
194
+ if release_tag is not OMIT:
195
+ _request["release_tag"] = release_tag
196
+ if external_id is not OMIT:
197
+ _request["external_id"] = external_id
198
+ if expand_meta is not OMIT:
199
+ _request["expand_meta"] = expand_meta
200
+ if raw_overrides is not OMIT:
201
+ _request["raw_overrides"] = raw_overrides
202
+ if expand_raw is not OMIT:
203
+ _request["expand_raw"] = expand_raw
204
+ if metadata is not OMIT:
205
+ _request["metadata"] = metadata
206
+ with self._client_wrapper.httpx_client.stream(
207
+ "POST",
208
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().predict}/", "v1/execute-prompt-stream"),
209
+ json=jsonable_encoder(_request),
210
+ headers=self._client_wrapper.get_headers(),
211
+ timeout=None,
212
+ ) as _response:
213
+ if 200 <= _response.status_code < 300:
214
+ for _text in _response.iter_lines():
215
+ if len(_text) == 0:
216
+ continue
217
+ yield pydantic.parse_obj_as(ExecutePromptEvent, json.loads(_text)) # type: ignore
218
+ return
219
+ _response.read()
220
+ if _response.status_code == 400:
221
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
222
+ if _response.status_code == 403:
223
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
224
+ if _response.status_code == 404:
225
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
226
+ if _response.status_code == 500:
227
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
228
+ try:
229
+ _response_json = _response.json()
230
+ except JSONDecodeError:
231
+ raise ApiError(status_code=_response.status_code, body=_response.text)
232
+ raise ApiError(status_code=_response.status_code, body=_response_json)
233
+
69
234
  def execute_workflow_stream(
70
235
  self,
71
236
  *,
@@ -187,7 +352,7 @@ class Vellum:
187
352
  if _response.status_code == 400:
188
353
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
189
354
  if _response.status_code == 403:
190
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
355
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
191
356
  if _response.status_code == 404:
192
357
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
193
358
  if _response.status_code == 500:
@@ -244,7 +409,7 @@ class Vellum:
244
409
  if _response.status_code == 400:
245
410
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
246
411
  if _response.status_code == 403:
247
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
412
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
248
413
  if _response.status_code == 404:
249
414
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
250
415
  if _response.status_code == 500:
@@ -432,6 +597,167 @@ class AsyncVellum:
432
597
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
433
598
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
434
599
 
600
+ async def execute_prompt(
601
+ self,
602
+ *,
603
+ inputs: typing.List[PromptDeploymentInputRequest],
604
+ prompt_deployment_id: typing.Optional[str] = OMIT,
605
+ prompt_deployment_name: typing.Optional[str] = OMIT,
606
+ release_tag: typing.Optional[str] = OMIT,
607
+ external_id: typing.Optional[str] = OMIT,
608
+ expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
609
+ raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
610
+ expand_raw: typing.Optional[typing.List[str]] = OMIT,
611
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
612
+ ) -> ExecutePromptResponse:
613
+ """
614
+ Executes a deployed Prompt and returns the result.
615
+
616
+ Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
617
+ In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
618
+
619
+ Parameters:
620
+ - inputs: typing.List[PromptDeploymentInputRequest].
621
+
622
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
623
+
624
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
625
+
626
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
627
+
628
+ - external_id: typing.Optional[str].
629
+
630
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
631
+
632
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
633
+
634
+ - expand_raw: typing.Optional[typing.List[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
635
+
636
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]].
637
+ """
638
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
639
+ if prompt_deployment_id is not OMIT:
640
+ _request["prompt_deployment_id"] = prompt_deployment_id
641
+ if prompt_deployment_name is not OMIT:
642
+ _request["prompt_deployment_name"] = prompt_deployment_name
643
+ if release_tag is not OMIT:
644
+ _request["release_tag"] = release_tag
645
+ if external_id is not OMIT:
646
+ _request["external_id"] = external_id
647
+ if expand_meta is not OMIT:
648
+ _request["expand_meta"] = expand_meta
649
+ if raw_overrides is not OMIT:
650
+ _request["raw_overrides"] = raw_overrides
651
+ if expand_raw is not OMIT:
652
+ _request["expand_raw"] = expand_raw
653
+ if metadata is not OMIT:
654
+ _request["metadata"] = metadata
655
+ _response = await self._client_wrapper.httpx_client.request(
656
+ "POST",
657
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().predict}/", "v1/execute-prompt"),
658
+ json=jsonable_encoder(_request),
659
+ headers=self._client_wrapper.get_headers(),
660
+ timeout=None,
661
+ )
662
+ if 200 <= _response.status_code < 300:
663
+ return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
664
+ if _response.status_code == 400:
665
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
666
+ if _response.status_code == 403:
667
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
668
+ if _response.status_code == 404:
669
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
670
+ if _response.status_code == 500:
671
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
672
+ try:
673
+ _response_json = _response.json()
674
+ except JSONDecodeError:
675
+ raise ApiError(status_code=_response.status_code, body=_response.text)
676
+ raise ApiError(status_code=_response.status_code, body=_response_json)
677
+
678
+ async def execute_prompt_stream(
679
+ self,
680
+ *,
681
+ inputs: typing.List[PromptDeploymentInputRequest],
682
+ prompt_deployment_id: typing.Optional[str] = OMIT,
683
+ prompt_deployment_name: typing.Optional[str] = OMIT,
684
+ release_tag: typing.Optional[str] = OMIT,
685
+ external_id: typing.Optional[str] = OMIT,
686
+ expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
687
+ raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
688
+ expand_raw: typing.Optional[typing.List[str]] = OMIT,
689
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
690
+ ) -> typing.AsyncIterator[ExecutePromptEvent]:
691
+ """
692
+ Executes a deployed Prompt and streams back the results.
693
+
694
+ Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
695
+ In the meantime, we recommend still using the `/generate-stream` endpoint for prompts with function calling
696
+
697
+ Parameters:
698
+ - inputs: typing.List[PromptDeploymentInputRequest].
699
+
700
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
701
+
702
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
703
+
704
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
705
+
706
+ - external_id: typing.Optional[str].
707
+
708
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
709
+
710
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
711
+
712
+ - expand_raw: typing.Optional[typing.List[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
713
+
714
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]].
715
+ """
716
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
717
+ if prompt_deployment_id is not OMIT:
718
+ _request["prompt_deployment_id"] = prompt_deployment_id
719
+ if prompt_deployment_name is not OMIT:
720
+ _request["prompt_deployment_name"] = prompt_deployment_name
721
+ if release_tag is not OMIT:
722
+ _request["release_tag"] = release_tag
723
+ if external_id is not OMIT:
724
+ _request["external_id"] = external_id
725
+ if expand_meta is not OMIT:
726
+ _request["expand_meta"] = expand_meta
727
+ if raw_overrides is not OMIT:
728
+ _request["raw_overrides"] = raw_overrides
729
+ if expand_raw is not OMIT:
730
+ _request["expand_raw"] = expand_raw
731
+ if metadata is not OMIT:
732
+ _request["metadata"] = metadata
733
+ async with self._client_wrapper.httpx_client.stream(
734
+ "POST",
735
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().predict}/", "v1/execute-prompt-stream"),
736
+ json=jsonable_encoder(_request),
737
+ headers=self._client_wrapper.get_headers(),
738
+ timeout=None,
739
+ ) as _response:
740
+ if 200 <= _response.status_code < 300:
741
+ async for _text in _response.aiter_lines():
742
+ if len(_text) == 0:
743
+ continue
744
+ yield pydantic.parse_obj_as(ExecutePromptEvent, json.loads(_text)) # type: ignore
745
+ return
746
+ await _response.aread()
747
+ if _response.status_code == 400:
748
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
749
+ if _response.status_code == 403:
750
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
751
+ if _response.status_code == 404:
752
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
753
+ if _response.status_code == 500:
754
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
755
+ try:
756
+ _response_json = _response.json()
757
+ except JSONDecodeError:
758
+ raise ApiError(status_code=_response.status_code, body=_response.text)
759
+ raise ApiError(status_code=_response.status_code, body=_response_json)
760
+
435
761
  async def execute_workflow_stream(
436
762
  self,
437
763
  *,
@@ -553,7 +879,7 @@ class AsyncVellum:
553
879
  if _response.status_code == 400:
554
880
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
555
881
  if _response.status_code == 403:
556
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
882
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
557
883
  if _response.status_code == 404:
558
884
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
559
885
  if _response.status_code == 500:
@@ -610,7 +936,7 @@ class AsyncVellum:
610
936
  if _response.status_code == 400:
611
937
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
612
938
  if _response.status_code == 403:
613
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
939
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
614
940
  if _response.status_code == 404:
615
941
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
616
942
  if _response.status_code == 500:
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "vellum-ai",
19
- "X-Fern-SDK-Version": "0.1.8",
19
+ "X-Fern-SDK-Version": "v0.1.10",
20
20
  }
21
21
  headers["X_API_KEY"] = self.api_key
22
22
  return headers
@@ -1,9 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ import typing
4
+
3
5
  from ..core.api_error import ApiError
4
- from ..types.generate_error_response import GenerateErrorResponse
5
6
 
6
7
 
7
8
  class ForbiddenError(ApiError):
8
- def __init__(self, body: GenerateErrorResponse):
9
+ def __init__(self, body: typing.Any):
9
10
  super().__init__(status_code=403, body=body)
@@ -63,6 +63,7 @@ class RegisteredPromptsClient:
63
63
  * `HOSTED` - Hosted
64
64
  * `MOSAICML` - MosaicML
65
65
  * `OPENAI` - OpenAI
66
+ * `FIREWORKS_AI` - Fireworks AI
66
67
  * `HUGGINGFACE` - HuggingFace
67
68
  * `MYSTIC` - Mystic
68
69
  * `PYQ` - Pyq
@@ -146,6 +147,7 @@ class AsyncRegisteredPromptsClient:
146
147
  * `HOSTED` - Hosted
147
148
  * `MOSAICML` - MosaicML
148
149
  * `OPENAI` - OpenAI
150
+ * `FIREWORKS_AI` - Fireworks AI
149
151
  * `HUGGINGFACE` - HuggingFace
150
152
  * `MYSTIC` - Mystic
151
153
  * `PYQ` - Pyq