vellum-ai 0.1.3__tar.gz → 0.1.5__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (205) hide show
  1. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/PKG-INFO +1 -1
  2. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/pyproject.toml +1 -1
  3. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/__init__.py +88 -0
  4. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/client.py +118 -7
  5. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/client_wrapper.py +1 -1
  6. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/forbidden_error.py +3 -2
  7. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/test_suites/client.py +25 -27
  8. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/__init__.py +96 -0
  9. vellum_ai-0.1.5/src/vellum/types/chat_history_input_request.py +30 -0
  10. vellum_ai-0.1.5/src/vellum/types/error_execute_prompt_response.py +30 -0
  11. vellum_ai-0.1.5/src/vellum/types/execute_prompt_api_error_response.py +28 -0
  12. vellum_ai-0.1.5/src/vellum/types/execute_prompt_response.py +43 -0
  13. vellum_ai-0.1.5/src/vellum/types/json_execute_prompt_response.py +29 -0
  14. vellum_ai-0.1.5/src/vellum/types/json_input_request.py +29 -0
  15. vellum_ai-0.1.5/src/vellum/types/prompt_deployment_input_request.py +43 -0
  16. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_model_parameters_request.py +1 -1
  17. vellum_ai-0.1.5/src/vellum/types/search_result_document_request.py +34 -0
  18. vellum_ai-0.1.5/src/vellum/types/search_result_request.py +34 -0
  19. vellum_ai-0.1.5/src/vellum/types/string_execute_prompt_response.py +29 -0
  20. vellum_ai-0.1.5/src/vellum/types/string_input_request.py +29 -0
  21. vellum_ai-0.1.5/src/vellum/types/test_case_chat_history_variable_value.py +30 -0
  22. vellum_ai-0.1.5/src/vellum/types/test_case_chat_history_variable_value_request.py +30 -0
  23. vellum_ai-0.1.5/src/vellum/types/test_case_error_variable_value.py +30 -0
  24. vellum_ai-0.1.5/src/vellum/types/test_case_error_variable_value_request.py +30 -0
  25. vellum_ai-0.1.5/src/vellum/types/test_case_json_variable_value.py +29 -0
  26. vellum_ai-0.1.5/src/vellum/types/test_case_json_variable_value_request.py +29 -0
  27. vellum_ai-0.1.5/src/vellum/types/test_case_number_variable_value.py +29 -0
  28. vellum_ai-0.1.5/src/vellum/types/test_case_number_variable_value_request.py +29 -0
  29. vellum_ai-0.1.5/src/vellum/types/test_case_search_results_variable_value.py +30 -0
  30. vellum_ai-0.1.5/src/vellum/types/test_case_search_results_variable_value_request.py +30 -0
  31. vellum_ai-0.1.5/src/vellum/types/test_case_string_variable_value.py +29 -0
  32. vellum_ai-0.1.5/src/vellum/types/test_case_string_variable_value_request.py +29 -0
  33. vellum_ai-0.1.5/src/vellum/types/test_case_variable_value.py +78 -0
  34. vellum_ai-0.1.5/src/vellum/types/test_case_variable_value_request.py +78 -0
  35. vellum_ai-0.1.5/src/vellum/types/test_suite_test_case.py +32 -0
  36. vellum_ai-0.1.5/src/vellum/types/vellum_error_request.py +30 -0
  37. vellum_ai-0.1.3/src/vellum/types/test_suite_test_case.py +0 -38
  38. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/README.md +0 -0
  39. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/__init__.py +0 -0
  40. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/api_error.py +0 -0
  41. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/datetime_utils.py +0 -0
  42. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/jsonable_encoder.py +0 -0
  43. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/core/remove_none_from_dict.py +0 -0
  44. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/environment.py +0 -0
  45. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/__init__.py +0 -0
  46. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/bad_request_error.py +0 -0
  47. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/conflict_error.py +0 -0
  48. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/internal_server_error.py +0 -0
  49. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/errors/not_found_error.py +0 -0
  50. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/py.typed +0 -0
  51. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/__init__.py +0 -0
  52. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/deployments/__init__.py +0 -0
  53. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/deployments/client.py +0 -0
  54. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/document_indexes/__init__.py +0 -0
  55. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/document_indexes/client.py +0 -0
  56. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/documents/__init__.py +0 -0
  57. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/documents/client.py +0 -0
  58. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/model_versions/__init__.py +0 -0
  59. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/model_versions/client.py +0 -0
  60. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/registered_prompts/__init__.py +0 -0
  61. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/registered_prompts/client.py +0 -0
  62. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/sandboxes/__init__.py +0 -0
  63. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/sandboxes/client.py +0 -0
  64. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/resources/test_suites/__init__.py +0 -0
  65. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/api_node_result.py +0 -0
  66. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/api_node_result_data.py +0 -0
  67. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/block_type_enum.py +0 -0
  68. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/chat_message.py +0 -0
  69. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/chat_message_request.py +0 -0
  70. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/chat_message_role.py +0 -0
  71. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/conditional_node_result.py +0 -0
  72. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/conditional_node_result_data.py +0 -0
  73. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/deployment_read.py +0 -0
  74. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/deployment_status.py +0 -0
  75. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/document_document_to_document_index.py +0 -0
  76. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/document_index_read.py +0 -0
  77. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/document_index_status.py +0 -0
  78. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/document_read.py +0 -0
  79. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/document_status.py +0 -0
  80. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/enriched_normalized_completion.py +0 -0
  81. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/environment_enum.py +0 -0
  82. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/evaluation_params.py +0 -0
  83. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/evaluation_params_request.py +0 -0
  84. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
  85. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/finish_reason_enum.py +0 -0
  86. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_error_response.py +0 -0
  87. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_options_request.py +0 -0
  88. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_request.py +0 -0
  89. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_response.py +0 -0
  90. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_result.py +0 -0
  91. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_result_data.py +0 -0
  92. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_result_error.py +0 -0
  93. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_response.py +0 -0
  94. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_result.py +0 -0
  95. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_result_data.py +0 -0
  96. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/indexing_state_enum.py +0 -0
  97. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/logical_operator.py +0 -0
  98. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/logprobs_enum.py +0 -0
  99. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_config_request.py +0 -0
  100. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
  101. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_rule_request.py +0 -0
  102. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_build_config.py +0 -0
  103. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_exec_config.py +0 -0
  104. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
  105. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_read.py +0 -0
  106. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_read_status_enum.py +0 -0
  107. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
  108. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
  109. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_error_value.py +0 -0
  110. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_json_value.py +0 -0
  111. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_number_value.py +0 -0
  112. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
  113. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_string_value.py +0 -0
  114. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
  115. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/normalized_log_probs.py +0 -0
  116. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/normalized_token_log_probs.py +0 -0
  117. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/paginated_slim_document_list.py +0 -0
  118. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/processing_failure_reason_enum.py +0 -0
  119. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/processing_state_enum.py +0 -0
  120. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_node_result.py +0 -0
  121. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_node_result_data.py +0 -0
  122. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block.py +0 -0
  123. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_data.py +0 -0
  124. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_data_request.py +0 -0
  125. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_properties.py +0 -0
  126. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_properties_request.py +0 -0
  127. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_request.py +0 -0
  128. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/provider_enum.py +0 -0
  129. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_error_response.py +0 -0
  130. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_prompt.py +0 -0
  131. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
  132. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_response.py +0 -0
  133. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_deployment.py +0 -0
  134. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
  135. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_model_version.py +0 -0
  136. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_sandbox.py +0 -0
  137. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
  138. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/sandbox_metric_input_params.py +0 -0
  139. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/sandbox_metric_input_params_request.py +0 -0
  140. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/sandbox_scenario.py +0 -0
  141. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/scenario_input.py +0 -0
  142. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/scenario_input_request.py +0 -0
  143. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/scenario_input_type_enum.py +0 -0
  144. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_error_response.py +0 -0
  145. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_filters_request.py +0 -0
  146. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_node_result.py +0 -0
  147. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_node_result_data.py +0 -0
  148. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_request_options_request.py +0 -0
  149. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_response.py +0 -0
  150. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_result.py +0 -0
  151. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_result_document.py +0 -0
  152. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_result_merging_request.py +0 -0
  153. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/search_weights_request.py +0 -0
  154. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/slim_document.py +0 -0
  155. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/submit_completion_actual_request.py +0 -0
  156. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
  157. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
  158. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_chat_history_result.py +0 -0
  159. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_error_result.py +0 -0
  160. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_json_result.py +0 -0
  161. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_number_result.py +0 -0
  162. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result.py +0 -0
  163. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result_data.py +0 -0
  164. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result_output.py +0 -0
  165. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_search_results_result.py +0 -0
  166. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/templating_node_string_result.py +0 -0
  167. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
  168. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_error_result.py +0 -0
  169. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_json_result.py +0 -0
  170. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_number_result.py +0 -0
  171. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result.py +0 -0
  172. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result_data.py +0 -0
  173. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result_output.py +0 -0
  174. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_search_results_result.py +0 -0
  175. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_string_result.py +0 -0
  176. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/upload_document_error_response.py +0 -0
  177. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/upload_document_response.py +0 -0
  178. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/vellum_error.py +0 -0
  179. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/vellum_error_code_enum.py +0 -0
  180. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/vellum_variable.py +0 -0
  181. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/vellum_variable_type.py +0 -0
  182. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_event_error.py +0 -0
  183. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
  184. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
  185. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
  186. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_event_error_code.py +0 -0
  187. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_event_type.py +0 -0
  188. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
  189. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
  190. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_data.py +0 -0
  191. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_event.py +0 -0
  192. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_event_state.py +0 -0
  193. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
  194. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_input_request.py +0 -0
  195. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_json_input_request.py +0 -0
  196. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_string_input_request.py +0 -0
  197. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event.py +0 -0
  198. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data.py +0 -0
  199. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
  200. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
  201. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
  202. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
  203. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
  204. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
  205. {vellum_ai-0.1.3 → vellum_ai-0.1.5}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary:
5
5
  Requires-Python: >=3.7,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "vellum-ai"
3
- version = "v0.1.3"
3
+ version = "v0.1.5"
4
4
  description = ""
5
5
  readme = "README.md"
6
6
  authors = []
@@ -4,6 +4,7 @@ from .types import (
4
4
  ApiNodeResult,
5
5
  ApiNodeResultData,
6
6
  BlockTypeEnum,
7
+ ChatHistoryInputRequest,
7
8
  ChatMessage,
8
9
  ChatMessageRequest,
9
10
  ChatMessageRole,
@@ -18,8 +19,14 @@ from .types import (
18
19
  DocumentStatus,
19
20
  EnrichedNormalizedCompletion,
20
21
  EnvironmentEnum,
22
+ ErrorExecutePromptResponse,
21
23
  EvaluationParams,
22
24
  EvaluationParamsRequest,
25
+ ExecutePromptApiErrorResponse,
26
+ ExecutePromptResponse,
27
+ ExecutePromptResponse_Error,
28
+ ExecutePromptResponse_Json,
29
+ ExecutePromptResponse_String,
23
30
  ExecuteWorkflowStreamErrorResponse,
24
31
  FinishReasonEnum,
25
32
  GenerateErrorResponse,
@@ -33,6 +40,8 @@ from .types import (
33
40
  GenerateStreamResult,
34
41
  GenerateStreamResultData,
35
42
  IndexingStateEnum,
43
+ JsonExecutePromptResponse,
44
+ JsonInputRequest,
36
45
  LogicalOperator,
37
46
  LogprobsEnum,
38
47
  MetadataFilterConfigRequest,
@@ -62,6 +71,10 @@ from .types import (
62
71
  PaginatedSlimDocumentList,
63
72
  ProcessingFailureReasonEnum,
64
73
  ProcessingStateEnum,
74
+ PromptDeploymentInputRequest,
75
+ PromptDeploymentInputRequest_ChatHistory,
76
+ PromptDeploymentInputRequest_Json,
77
+ PromptDeploymentInputRequest_String,
65
78
  PromptNodeResult,
66
79
  PromptNodeResultData,
67
80
  PromptTemplateBlock,
@@ -95,9 +108,13 @@ from .types import (
95
108
  SearchResponse,
96
109
  SearchResult,
97
110
  SearchResultDocument,
111
+ SearchResultDocumentRequest,
98
112
  SearchResultMergingRequest,
113
+ SearchResultRequest,
99
114
  SearchWeightsRequest,
100
115
  SlimDocument,
116
+ StringExecutePromptResponse,
117
+ StringInputRequest,
101
118
  SubmitCompletionActualRequest,
102
119
  SubmitCompletionActualsErrorResponse,
103
120
  SubmitWorkflowExecutionActualRequest,
@@ -134,11 +151,38 @@ from .types import (
134
151
  TerminalNodeResultOutput_String,
135
152
  TerminalNodeSearchResultsResult,
136
153
  TerminalNodeStringResult,
154
+ TestCaseChatHistoryVariableValue,
155
+ TestCaseChatHistoryVariableValueRequest,
156
+ TestCaseErrorVariableValue,
157
+ TestCaseErrorVariableValueRequest,
158
+ TestCaseJsonVariableValue,
159
+ TestCaseJsonVariableValueRequest,
160
+ TestCaseNumberVariableValue,
161
+ TestCaseNumberVariableValueRequest,
162
+ TestCaseSearchResultsVariableValue,
163
+ TestCaseSearchResultsVariableValueRequest,
164
+ TestCaseStringVariableValue,
165
+ TestCaseStringVariableValueRequest,
166
+ TestCaseVariableValue,
167
+ TestCaseVariableValueRequest,
168
+ TestCaseVariableValueRequest_ChatHistory,
169
+ TestCaseVariableValueRequest_Error,
170
+ TestCaseVariableValueRequest_Json,
171
+ TestCaseVariableValueRequest_Number,
172
+ TestCaseVariableValueRequest_SearchResults,
173
+ TestCaseVariableValueRequest_String,
174
+ TestCaseVariableValue_ChatHistory,
175
+ TestCaseVariableValue_Error,
176
+ TestCaseVariableValue_Json,
177
+ TestCaseVariableValue_Number,
178
+ TestCaseVariableValue_SearchResults,
179
+ TestCaseVariableValue_String,
137
180
  TestSuiteTestCase,
138
181
  UploadDocumentErrorResponse,
139
182
  UploadDocumentResponse,
140
183
  VellumError,
141
184
  VellumErrorCodeEnum,
185
+ VellumErrorRequest,
142
186
  VellumVariable,
143
187
  VellumVariableType,
144
188
  WorkflowEventError,
@@ -200,6 +244,7 @@ __all__ = [
200
244
  "ApiNodeResultData",
201
245
  "BadRequestError",
202
246
  "BlockTypeEnum",
247
+ "ChatHistoryInputRequest",
203
248
  "ChatMessage",
204
249
  "ChatMessageRequest",
205
250
  "ChatMessageRole",
@@ -215,8 +260,14 @@ __all__ = [
215
260
  "DocumentStatus",
216
261
  "EnrichedNormalizedCompletion",
217
262
  "EnvironmentEnum",
263
+ "ErrorExecutePromptResponse",
218
264
  "EvaluationParams",
219
265
  "EvaluationParamsRequest",
266
+ "ExecutePromptApiErrorResponse",
267
+ "ExecutePromptResponse",
268
+ "ExecutePromptResponse_Error",
269
+ "ExecutePromptResponse_Json",
270
+ "ExecutePromptResponse_String",
220
271
  "ExecuteWorkflowStreamErrorResponse",
221
272
  "FinishReasonEnum",
222
273
  "ForbiddenError",
@@ -232,6 +283,8 @@ __all__ = [
232
283
  "GenerateStreamResultData",
233
284
  "IndexingStateEnum",
234
285
  "InternalServerError",
286
+ "JsonExecutePromptResponse",
287
+ "JsonInputRequest",
235
288
  "LogicalOperator",
236
289
  "LogprobsEnum",
237
290
  "MetadataFilterConfigRequest",
@@ -262,6 +315,10 @@ __all__ = [
262
315
  "PaginatedSlimDocumentList",
263
316
  "ProcessingFailureReasonEnum",
264
317
  "ProcessingStateEnum",
318
+ "PromptDeploymentInputRequest",
319
+ "PromptDeploymentInputRequest_ChatHistory",
320
+ "PromptDeploymentInputRequest_Json",
321
+ "PromptDeploymentInputRequest_String",
265
322
  "PromptNodeResult",
266
323
  "PromptNodeResultData",
267
324
  "PromptTemplateBlock",
@@ -295,9 +352,13 @@ __all__ = [
295
352
  "SearchResponse",
296
353
  "SearchResult",
297
354
  "SearchResultDocument",
355
+ "SearchResultDocumentRequest",
298
356
  "SearchResultMergingRequest",
357
+ "SearchResultRequest",
299
358
  "SearchWeightsRequest",
300
359
  "SlimDocument",
360
+ "StringExecutePromptResponse",
361
+ "StringInputRequest",
301
362
  "SubmitCompletionActualRequest",
302
363
  "SubmitCompletionActualsErrorResponse",
303
364
  "SubmitWorkflowExecutionActualRequest",
@@ -334,12 +395,39 @@ __all__ = [
334
395
  "TerminalNodeResultOutput_String",
335
396
  "TerminalNodeSearchResultsResult",
336
397
  "TerminalNodeStringResult",
398
+ "TestCaseChatHistoryVariableValue",
399
+ "TestCaseChatHistoryVariableValueRequest",
400
+ "TestCaseErrorVariableValue",
401
+ "TestCaseErrorVariableValueRequest",
402
+ "TestCaseJsonVariableValue",
403
+ "TestCaseJsonVariableValueRequest",
404
+ "TestCaseNumberVariableValue",
405
+ "TestCaseNumberVariableValueRequest",
406
+ "TestCaseSearchResultsVariableValue",
407
+ "TestCaseSearchResultsVariableValueRequest",
408
+ "TestCaseStringVariableValue",
409
+ "TestCaseStringVariableValueRequest",
410
+ "TestCaseVariableValue",
411
+ "TestCaseVariableValueRequest",
412
+ "TestCaseVariableValueRequest_ChatHistory",
413
+ "TestCaseVariableValueRequest_Error",
414
+ "TestCaseVariableValueRequest_Json",
415
+ "TestCaseVariableValueRequest_Number",
416
+ "TestCaseVariableValueRequest_SearchResults",
417
+ "TestCaseVariableValueRequest_String",
418
+ "TestCaseVariableValue_ChatHistory",
419
+ "TestCaseVariableValue_Error",
420
+ "TestCaseVariableValue_Json",
421
+ "TestCaseVariableValue_Number",
422
+ "TestCaseVariableValue_SearchResults",
423
+ "TestCaseVariableValue_String",
337
424
  "TestSuiteTestCase",
338
425
  "UploadDocumentErrorResponse",
339
426
  "UploadDocumentResponse",
340
427
  "VellumEnvironment",
341
428
  "VellumError",
342
429
  "VellumErrorCodeEnum",
430
+ "VellumErrorRequest",
343
431
  "VellumVariable",
344
432
  "VellumVariableType",
345
433
  "WorkflowEventError",
@@ -22,11 +22,12 @@ from .resources.model_versions.client import AsyncModelVersionsClient, ModelVers
22
22
  from .resources.registered_prompts.client import AsyncRegisteredPromptsClient, RegisteredPromptsClient
23
23
  from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
24
24
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
25
- from .types.generate_error_response import GenerateErrorResponse
25
+ from .types.execute_prompt_response import ExecutePromptResponse
26
26
  from .types.generate_options_request import GenerateOptionsRequest
27
27
  from .types.generate_request import GenerateRequest
28
28
  from .types.generate_response import GenerateResponse
29
29
  from .types.generate_stream_response import GenerateStreamResponse
30
+ from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
30
31
  from .types.search_request_options_request import SearchRequestOptionsRequest
31
32
  from .types.search_response import SearchResponse
32
33
  from .types.submit_completion_actual_request import SubmitCompletionActualRequest
@@ -66,6 +67,61 @@ class Vellum:
66
67
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
67
68
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
68
69
 
70
+ def execute_prompt(
71
+ self,
72
+ *,
73
+ inputs: typing.List[PromptDeploymentInputRequest],
74
+ prompt_deployment_id: typing.Optional[str] = OMIT,
75
+ prompt_deployment_name: typing.Optional[str] = OMIT,
76
+ release_tag: typing.Optional[str] = OMIT,
77
+ external_id: typing.Optional[str] = OMIT,
78
+ ) -> ExecutePromptResponse:
79
+ """
80
+ Executes a deployed Prompt and returns the result.
81
+
82
+ Parameters:
83
+ - inputs: typing.List[PromptDeploymentInputRequest].
84
+
85
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
86
+
87
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
88
+
89
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
90
+
91
+ - external_id: typing.Optional[str].
92
+ """
93
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
94
+ if prompt_deployment_id is not OMIT:
95
+ _request["prompt_deployment_id"] = prompt_deployment_id
96
+ if prompt_deployment_name is not OMIT:
97
+ _request["prompt_deployment_name"] = prompt_deployment_name
98
+ if release_tag is not OMIT:
99
+ _request["release_tag"] = release_tag
100
+ if external_id is not OMIT:
101
+ _request["external_id"] = external_id
102
+ _response = self._client_wrapper.httpx_client.request(
103
+ "POST",
104
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-prompt"),
105
+ json=jsonable_encoder(_request),
106
+ headers=self._client_wrapper.get_headers(),
107
+ timeout=None,
108
+ )
109
+ if 200 <= _response.status_code < 300:
110
+ return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
111
+ if _response.status_code == 400:
112
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
113
+ if _response.status_code == 403:
114
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
115
+ if _response.status_code == 404:
116
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
117
+ if _response.status_code == 500:
118
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
119
+ try:
120
+ _response_json = _response.json()
121
+ except JSONDecodeError:
122
+ raise ApiError(status_code=_response.status_code, body=_response.text)
123
+ raise ApiError(status_code=_response.status_code, body=_response_json)
124
+
69
125
  def execute_workflow_stream(
70
126
  self,
71
127
  *,
@@ -86,7 +142,7 @@ class Vellum:
86
142
 
87
143
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
88
144
 
89
- - inputs: typing.List[WorkflowRequestInputRequest].
145
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's deployment with their corresponding values.
90
146
 
91
147
  - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes.
92
148
 
@@ -187,7 +243,7 @@ class Vellum:
187
243
  if _response.status_code == 400:
188
244
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
189
245
  if _response.status_code == 403:
190
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
246
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
191
247
  if _response.status_code == 404:
192
248
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
193
249
  if _response.status_code == 500:
@@ -244,7 +300,7 @@ class Vellum:
244
300
  if _response.status_code == 400:
245
301
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
246
302
  if _response.status_code == 403:
247
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
303
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
248
304
  if _response.status_code == 404:
249
305
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
250
306
  if _response.status_code == 500:
@@ -432,6 +488,61 @@ class AsyncVellum:
432
488
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
433
489
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
434
490
 
491
+ async def execute_prompt(
492
+ self,
493
+ *,
494
+ inputs: typing.List[PromptDeploymentInputRequest],
495
+ prompt_deployment_id: typing.Optional[str] = OMIT,
496
+ prompt_deployment_name: typing.Optional[str] = OMIT,
497
+ release_tag: typing.Optional[str] = OMIT,
498
+ external_id: typing.Optional[str] = OMIT,
499
+ ) -> ExecutePromptResponse:
500
+ """
501
+ Executes a deployed Prompt and returns the result.
502
+
503
+ Parameters:
504
+ - inputs: typing.List[PromptDeploymentInputRequest].
505
+
506
+ - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
507
+
508
+ - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
509
+
510
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
511
+
512
+ - external_id: typing.Optional[str].
513
+ """
514
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
515
+ if prompt_deployment_id is not OMIT:
516
+ _request["prompt_deployment_id"] = prompt_deployment_id
517
+ if prompt_deployment_name is not OMIT:
518
+ _request["prompt_deployment_name"] = prompt_deployment_name
519
+ if release_tag is not OMIT:
520
+ _request["release_tag"] = release_tag
521
+ if external_id is not OMIT:
522
+ _request["external_id"] = external_id
523
+ _response = await self._client_wrapper.httpx_client.request(
524
+ "POST",
525
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-prompt"),
526
+ json=jsonable_encoder(_request),
527
+ headers=self._client_wrapper.get_headers(),
528
+ timeout=None,
529
+ )
530
+ if 200 <= _response.status_code < 300:
531
+ return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
532
+ if _response.status_code == 400:
533
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
534
+ if _response.status_code == 403:
535
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
536
+ if _response.status_code == 404:
537
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
538
+ if _response.status_code == 500:
539
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
540
+ try:
541
+ _response_json = _response.json()
542
+ except JSONDecodeError:
543
+ raise ApiError(status_code=_response.status_code, body=_response.text)
544
+ raise ApiError(status_code=_response.status_code, body=_response_json)
545
+
435
546
  async def execute_workflow_stream(
436
547
  self,
437
548
  *,
@@ -452,7 +563,7 @@ class AsyncVellum:
452
563
 
453
564
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
454
565
 
455
- - inputs: typing.List[WorkflowRequestInputRequest].
566
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's deployment with their corresponding values.
456
567
 
457
568
  - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes.
458
569
 
@@ -553,7 +664,7 @@ class AsyncVellum:
553
664
  if _response.status_code == 400:
554
665
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
555
666
  if _response.status_code == 403:
556
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
667
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
557
668
  if _response.status_code == 404:
558
669
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
559
670
  if _response.status_code == 500:
@@ -610,7 +721,7 @@ class AsyncVellum:
610
721
  if _response.status_code == 400:
611
722
  raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
612
723
  if _response.status_code == 403:
613
- raise ForbiddenError(pydantic.parse_obj_as(GenerateErrorResponse, _response.json())) # type: ignore
724
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
614
725
  if _response.status_code == 404:
615
726
  raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
616
727
  if _response.status_code == 500:
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "vellum-ai",
19
- "X-Fern-SDK-Version": "v0.1.3",
19
+ "X-Fern-SDK-Version": "v0.1.5",
20
20
  }
21
21
  headers["X_API_KEY"] = self.api_key
22
22
  return headers
@@ -1,9 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ import typing
4
+
3
5
  from ..core.api_error import ApiError
4
- from ..types.generate_error_response import GenerateErrorResponse
5
6
 
6
7
 
7
8
  class ForbiddenError(ApiError):
8
- def __init__(self, body: GenerateErrorResponse):
9
+ def __init__(self, body: typing.Any):
9
10
  super().__init__(status_code=403, body=body)
@@ -7,7 +7,7 @@ from json.decoder import JSONDecodeError
7
7
  from ...core.api_error import ApiError
8
8
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
- from ...types.evaluation_params_request import EvaluationParamsRequest
10
+ from ...types.test_case_variable_value_request import TestCaseVariableValueRequest
11
11
  from ...types.test_suite_test_case import TestSuiteTestCase
12
12
 
13
13
  try:
@@ -27,10 +27,10 @@ class TestSuitesClient:
27
27
  self,
28
28
  id: str,
29
29
  *,
30
- test_case_id: typing.Optional[str] = OMIT,
30
+ test_suite_test_case_request_id: typing.Optional[str] = OMIT,
31
31
  label: typing.Optional[str] = OMIT,
32
- input_values: typing.Dict[str, typing.Any],
33
- evaluation_params: EvaluationParamsRequest,
32
+ input_values: typing.List[TestCaseVariableValueRequest],
33
+ evaluation_values: typing.List[TestCaseVariableValueRequest],
34
34
  ) -> TestSuiteTestCase:
35
35
  """
36
36
  Upserts a new test case for a test suite, keying off of the optionally provided test case id.
@@ -44,15 +44,14 @@ class TestSuitesClient:
44
44
  Parameters:
45
45
  - id: str. A UUID string identifying this test suite.
46
46
 
47
- - test_case_id: typing.Optional[str]. The id of the test case to update. If none is provided, an id will be generated and a new test case will be appended.
47
+ - test_suite_test_case_request_id: typing.Optional[str].
48
48
 
49
- - label: typing.Optional[str]. A human-friendly label for the test case.
49
+ - label: typing.Optional[str].
50
50
 
51
- - input_values: typing.Dict[str, typing.Any]. Key/value pairs for each input variable that the Test Suite expects.
51
+ - input_values: typing.List[TestCaseVariableValueRequest].
52
52
 
53
- - evaluation_params: EvaluationParamsRequest. Parameters to use when evaluating the test case, specific to the test suite's evaluation metric.
53
+ - evaluation_values: typing.List[TestCaseVariableValueRequest].
54
54
  ---
55
- from vellum import EvaluationParamsRequest
56
55
  from vellum.client import Vellum
57
56
 
58
57
  client = Vellum(
@@ -60,13 +59,13 @@ class TestSuitesClient:
60
59
  )
61
60
  client.test_suites.upsert_test_suite_test_case(
62
61
  id="id",
63
- input_values={},
64
- evaluation_params=EvaluationParamsRequest(),
62
+ input_values=[],
63
+ evaluation_values=[],
65
64
  )
66
65
  """
67
- _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_params": evaluation_params}
68
- if test_case_id is not OMIT:
69
- _request["test_case_id"] = test_case_id
66
+ _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
67
+ if test_suite_test_case_request_id is not OMIT:
68
+ _request["id"] = test_suite_test_case_request_id
70
69
  if label is not OMIT:
71
70
  _request["label"] = label
72
71
  _response = self._client_wrapper.httpx_client.request(
@@ -130,10 +129,10 @@ class AsyncTestSuitesClient:
130
129
  self,
131
130
  id: str,
132
131
  *,
133
- test_case_id: typing.Optional[str] = OMIT,
132
+ test_suite_test_case_request_id: typing.Optional[str] = OMIT,
134
133
  label: typing.Optional[str] = OMIT,
135
- input_values: typing.Dict[str, typing.Any],
136
- evaluation_params: EvaluationParamsRequest,
134
+ input_values: typing.List[TestCaseVariableValueRequest],
135
+ evaluation_values: typing.List[TestCaseVariableValueRequest],
137
136
  ) -> TestSuiteTestCase:
138
137
  """
139
138
  Upserts a new test case for a test suite, keying off of the optionally provided test case id.
@@ -147,15 +146,14 @@ class AsyncTestSuitesClient:
147
146
  Parameters:
148
147
  - id: str. A UUID string identifying this test suite.
149
148
 
150
- - test_case_id: typing.Optional[str]. The id of the test case to update. If none is provided, an id will be generated and a new test case will be appended.
149
+ - test_suite_test_case_request_id: typing.Optional[str].
151
150
 
152
- - label: typing.Optional[str]. A human-friendly label for the test case.
151
+ - label: typing.Optional[str].
153
152
 
154
- - input_values: typing.Dict[str, typing.Any]. Key/value pairs for each input variable that the Test Suite expects.
153
+ - input_values: typing.List[TestCaseVariableValueRequest].
155
154
 
156
- - evaluation_params: EvaluationParamsRequest. Parameters to use when evaluating the test case, specific to the test suite's evaluation metric.
155
+ - evaluation_values: typing.List[TestCaseVariableValueRequest].
157
156
  ---
158
- from vellum import EvaluationParamsRequest
159
157
  from vellum.client import AsyncVellum
160
158
 
161
159
  client = AsyncVellum(
@@ -163,13 +161,13 @@ class AsyncTestSuitesClient:
163
161
  )
164
162
  await client.test_suites.upsert_test_suite_test_case(
165
163
  id="id",
166
- input_values={},
167
- evaluation_params=EvaluationParamsRequest(),
164
+ input_values=[],
165
+ evaluation_values=[],
168
166
  )
169
167
  """
170
- _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_params": evaluation_params}
171
- if test_case_id is not OMIT:
172
- _request["test_case_id"] = test_case_id
168
+ _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
169
+ if test_suite_test_case_request_id is not OMIT:
170
+ _request["id"] = test_suite_test_case_request_id
173
171
  if label is not OMIT:
174
172
  _request["label"] = label
175
173
  _response = await self._client_wrapper.httpx_client.request(