vellum-ai 0.3.23__py3-none-any.whl → 0.5.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (327) hide show
  1. vellum/__init__.py +37 -60
  2. vellum/client.py +139 -137
  3. vellum/core/__init__.py +2 -0
  4. vellum/core/client_wrapper.py +1 -1
  5. vellum/core/jsonable_encoder.py +5 -9
  6. vellum/core/pydantic_utilities.py +12 -0
  7. vellum/errors/__init__.py +1 -2
  8. vellum/lib/__init__.py +5 -0
  9. vellum/lib/test_suites/__init__.py +5 -0
  10. vellum/lib/test_suites/constants.py +2 -0
  11. vellum/lib/test_suites/exceptions.py +2 -0
  12. vellum/lib/test_suites/resources.py +253 -0
  13. vellum/lib/utils/__init__.py +0 -0
  14. vellum/lib/utils/env.py +11 -0
  15. vellum/lib/utils/exceptions.py +2 -0
  16. vellum/lib/utils/paginator.py +28 -0
  17. vellum/resources/__init__.py +0 -4
  18. vellum/resources/deployments/client.py +27 -31
  19. vellum/resources/deployments/types/deployments_list_request_status.py +1 -13
  20. vellum/resources/document_indexes/client.py +35 -39
  21. vellum/resources/document_indexes/types/document_indexes_list_request_status.py +1 -13
  22. vellum/resources/documents/client.py +131 -35
  23. vellum/resources/folder_entities/client.py +4 -4
  24. vellum/resources/sandboxes/client.py +18 -46
  25. vellum/resources/test_suite_runs/client.py +19 -23
  26. vellum/resources/test_suites/client.py +17 -21
  27. vellum/resources/workflow_deployments/client.py +13 -17
  28. vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +1 -13
  29. vellum/types/__init__.py +37 -55
  30. vellum/types/api_node_result.py +3 -7
  31. vellum/types/api_node_result_data.py +4 -8
  32. vellum/types/array_chat_message_content.py +3 -7
  33. vellum/types/array_chat_message_content_request.py +3 -7
  34. vellum/types/chat_history_input_request.py +4 -8
  35. vellum/types/chat_history_variable_value.py +3 -7
  36. vellum/types/chat_message.py +4 -8
  37. vellum/types/chat_message_request.py +4 -8
  38. vellum/types/chat_message_role.py +1 -32
  39. vellum/types/code_execution_node_array_result.py +3 -7
  40. vellum/types/code_execution_node_chat_history_result.py +3 -7
  41. vellum/types/code_execution_node_error_result.py +3 -7
  42. vellum/types/code_execution_node_function_call_result.py +3 -7
  43. vellum/types/code_execution_node_json_result.py +3 -7
  44. vellum/types/code_execution_node_number_result.py +3 -7
  45. vellum/types/code_execution_node_result.py +3 -7
  46. vellum/types/code_execution_node_result_data.py +3 -7
  47. vellum/types/code_execution_node_search_results_result.py +3 -7
  48. vellum/types/code_execution_node_string_result.py +3 -7
  49. vellum/types/conditional_node_result.py +3 -7
  50. vellum/types/conditional_node_result_data.py +3 -7
  51. vellum/types/deployment_provider_payload_response.py +3 -7
  52. vellum/types/deployment_read.py +8 -12
  53. vellum/types/document_document_to_document_index.py +6 -10
  54. vellum/types/document_index_read.py +8 -12
  55. vellum/types/document_read.py +8 -12
  56. vellum/types/enriched_normalized_completion.py +9 -13
  57. vellum/types/entity_status.py +1 -18
  58. vellum/types/environment_enum.py +1 -27
  59. vellum/types/error_variable_value.py +3 -7
  60. vellum/types/execute_prompt_api_error_response.py +4 -8
  61. vellum/types/execute_workflow_error_response.py +4 -8
  62. vellum/types/execute_workflow_response.py +3 -7
  63. vellum/types/execute_workflow_stream_error_response.py +4 -8
  64. vellum/types/execution_array_vellum_value.py +4 -8
  65. vellum/types/execution_chat_history_vellum_value.py +4 -8
  66. vellum/types/execution_error_vellum_value.py +4 -8
  67. vellum/types/execution_function_call_vellum_value.py +4 -8
  68. vellum/types/execution_json_vellum_value.py +4 -8
  69. vellum/types/execution_number_vellum_value.py +4 -8
  70. vellum/types/execution_search_results_vellum_value.py +4 -8
  71. vellum/types/execution_string_vellum_value.py +4 -8
  72. vellum/types/external_test_case_execution.py +4 -8
  73. vellum/types/external_test_case_execution_request.py +4 -8
  74. vellum/types/finish_reason_enum.py +1 -27
  75. vellum/types/fulfilled_execute_prompt_event.py +3 -7
  76. vellum/types/fulfilled_execute_prompt_response.py +5 -9
  77. vellum/types/fulfilled_execute_workflow_workflow_result_event.py +3 -7
  78. vellum/types/fulfilled_function_call.py +3 -7
  79. vellum/types/{registered_prompt_input_variable_request.py → fulfilled_function_call_request.py} +10 -9
  80. vellum/types/fulfilled_prompt_execution_meta.py +3 -7
  81. vellum/types/fulfilled_workflow_node_result_event.py +3 -7
  82. vellum/types/function_call_chat_message_content.py +3 -7
  83. vellum/types/function_call_chat_message_content_request.py +3 -7
  84. vellum/types/function_call_chat_message_content_value.py +3 -7
  85. vellum/types/function_call_chat_message_content_value_request.py +3 -7
  86. vellum/types/function_call_variable_value.py +3 -7
  87. vellum/types/generate_error_response.py +4 -8
  88. vellum/types/generate_options_request.py +4 -8
  89. vellum/types/generate_request.py +6 -10
  90. vellum/types/generate_result.py +5 -9
  91. vellum/types/generate_result_data.py +4 -8
  92. vellum/types/generate_result_error.py +4 -8
  93. vellum/types/generate_stream_response.py +3 -7
  94. vellum/types/generate_stream_result.py +3 -7
  95. vellum/types/generate_stream_result_data.py +3 -7
  96. vellum/types/image_chat_message_content.py +3 -7
  97. vellum/types/image_chat_message_content_request.py +3 -7
  98. vellum/types/image_variable_value.py +3 -7
  99. vellum/types/indexing_state_enum.py +3 -37
  100. vellum/types/initiated_execute_prompt_event.py +3 -7
  101. vellum/types/initiated_prompt_execution_meta.py +3 -7
  102. vellum/types/initiated_workflow_node_result_event.py +3 -7
  103. vellum/types/json_input_request.py +4 -8
  104. vellum/types/json_variable_value.py +3 -7
  105. vellum/types/logical_operator.py +23 -173
  106. vellum/types/logprobs_enum.py +1 -18
  107. vellum/types/metadata_filter_config_request.py +3 -7
  108. vellum/types/metadata_filter_rule_combinator.py +1 -18
  109. vellum/types/metadata_filter_rule_request.py +3 -7
  110. vellum/types/ml_model_usage.py +3 -7
  111. vellum/types/named_scenario_input_chat_history_variable_value_request.py +31 -0
  112. vellum/types/named_scenario_input_request.py +31 -0
  113. vellum/types/{registered_prompt_sandbox_snapshot.py → named_scenario_input_string_variable_value_request.py} +7 -9
  114. vellum/types/named_test_case_chat_history_variable_value.py +3 -7
  115. vellum/types/named_test_case_chat_history_variable_value_request.py +3 -7
  116. vellum/types/named_test_case_error_variable_value.py +3 -7
  117. vellum/types/named_test_case_error_variable_value_request.py +3 -7
  118. vellum/types/{register_prompt_prompt.py → named_test_case_function_call_variable_value.py} +7 -13
  119. vellum/types/named_test_case_function_call_variable_value_request.py +31 -0
  120. vellum/types/named_test_case_json_variable_value.py +3 -7
  121. vellum/types/named_test_case_json_variable_value_request.py +3 -7
  122. vellum/types/named_test_case_number_variable_value.py +3 -7
  123. vellum/types/named_test_case_number_variable_value_request.py +3 -7
  124. vellum/types/named_test_case_search_results_variable_value.py +3 -7
  125. vellum/types/named_test_case_search_results_variable_value_request.py +3 -7
  126. vellum/types/named_test_case_string_variable_value.py +3 -7
  127. vellum/types/named_test_case_string_variable_value_request.py +3 -7
  128. vellum/types/named_test_case_variable_value.py +12 -0
  129. vellum/types/named_test_case_variable_value_request.py +12 -0
  130. vellum/types/node_input_compiled_array_value.py +3 -7
  131. vellum/types/node_input_compiled_chat_history_value.py +3 -7
  132. vellum/types/node_input_compiled_error_value.py +3 -7
  133. vellum/types/node_input_compiled_function_call.py +3 -7
  134. vellum/types/node_input_compiled_json_value.py +3 -7
  135. vellum/types/node_input_compiled_number_value.py +3 -7
  136. vellum/types/node_input_compiled_search_results_value.py +3 -7
  137. vellum/types/node_input_compiled_string_value.py +3 -7
  138. vellum/types/node_output_compiled_array_value.py +3 -7
  139. vellum/types/node_output_compiled_chat_history_value.py +3 -7
  140. vellum/types/node_output_compiled_error_value.py +3 -7
  141. vellum/types/node_output_compiled_function_value.py +3 -7
  142. vellum/types/node_output_compiled_json_value.py +3 -7
  143. vellum/types/node_output_compiled_number_value.py +3 -7
  144. vellum/types/node_output_compiled_search_results_value.py +3 -7
  145. vellum/types/node_output_compiled_string_value.py +3 -7
  146. vellum/types/normalized_log_probs.py +3 -7
  147. vellum/types/normalized_token_log_probs.py +3 -7
  148. vellum/types/number_variable_value.py +3 -7
  149. vellum/types/paginated_document_index_read_list.py +3 -7
  150. vellum/types/paginated_slim_deployment_read_list.py +3 -7
  151. vellum/types/paginated_slim_document_list.py +3 -7
  152. vellum/types/paginated_slim_workflow_deployment_list.py +3 -7
  153. vellum/types/paginated_test_suite_run_execution_list.py +3 -7
  154. vellum/types/paginated_test_suite_test_case_list.py +3 -7
  155. vellum/types/processing_failure_reason_enum.py +1 -20
  156. vellum/types/processing_state_enum.py +1 -32
  157. vellum/types/prompt_deployment_expand_meta_request_request.py +9 -13
  158. vellum/types/prompt_execution_meta.py +3 -7
  159. vellum/types/prompt_node_result.py +3 -7
  160. vellum/types/prompt_node_result_data.py +3 -7
  161. vellum/types/raw_prompt_execution_overrides_request.py +5 -9
  162. vellum/types/rejected_execute_prompt_event.py +3 -7
  163. vellum/types/rejected_execute_prompt_response.py +5 -9
  164. vellum/types/rejected_execute_workflow_workflow_result_event.py +3 -7
  165. vellum/types/rejected_function_call.py +3 -7
  166. vellum/types/rejected_prompt_execution_meta.py +3 -7
  167. vellum/types/rejected_workflow_node_result_event.py +3 -7
  168. vellum/types/sandbox_scenario.py +5 -9
  169. vellum/types/scenario_input.py +18 -21
  170. vellum/types/{registered_prompt_sandbox.py → scenario_input_chat_history_variable_value.py} +7 -13
  171. vellum/types/{register_prompt_error_response.py → scenario_input_string_variable_value.py} +7 -9
  172. vellum/types/search_error_response.py +4 -8
  173. vellum/types/search_filters_request.py +5 -9
  174. vellum/types/search_node_result.py +3 -7
  175. vellum/types/search_node_result_data.py +4 -8
  176. vellum/types/search_request_options_request.py +7 -11
  177. vellum/types/search_response.py +4 -8
  178. vellum/types/search_result.py +6 -10
  179. vellum/types/search_result_document.py +7 -11
  180. vellum/types/search_result_document_request.py +6 -10
  181. vellum/types/search_result_merging_request.py +4 -8
  182. vellum/types/search_result_request.py +6 -10
  183. vellum/types/search_results_variable_value.py +3 -7
  184. vellum/types/search_weights_request.py +5 -9
  185. vellum/types/slim_deployment_read.py +7 -11
  186. vellum/types/slim_document.py +12 -16
  187. vellum/types/slim_workflow_deployment.py +9 -13
  188. vellum/types/streaming_execute_prompt_event.py +4 -8
  189. vellum/types/streaming_prompt_execution_meta.py +3 -7
  190. vellum/types/streaming_workflow_node_result_event.py +3 -7
  191. vellum/types/string_chat_message_content.py +3 -7
  192. vellum/types/string_chat_message_content_request.py +3 -7
  193. vellum/types/string_input_request.py +4 -8
  194. vellum/types/string_variable_value.py +3 -7
  195. vellum/types/submit_completion_actual_request.py +8 -12
  196. vellum/types/submit_completion_actuals_error_response.py +3 -7
  197. vellum/types/subworkflow_node_result.py +3 -7
  198. vellum/types/templating_node_array_result.py +3 -7
  199. vellum/types/templating_node_chat_history_result.py +3 -7
  200. vellum/types/templating_node_error_result.py +3 -7
  201. vellum/types/templating_node_function_call_result.py +3 -7
  202. vellum/types/templating_node_json_result.py +3 -7
  203. vellum/types/templating_node_number_result.py +3 -7
  204. vellum/types/templating_node_result.py +3 -7
  205. vellum/types/templating_node_result_data.py +3 -7
  206. vellum/types/templating_node_search_results_result.py +3 -7
  207. vellum/types/templating_node_string_result.py +3 -7
  208. vellum/types/terminal_node_array_result.py +4 -8
  209. vellum/types/terminal_node_chat_history_result.py +4 -8
  210. vellum/types/terminal_node_error_result.py +4 -8
  211. vellum/types/terminal_node_function_call_result.py +4 -8
  212. vellum/types/terminal_node_json_result.py +4 -8
  213. vellum/types/terminal_node_number_result.py +4 -8
  214. vellum/types/terminal_node_result.py +3 -7
  215. vellum/types/terminal_node_result_data.py +3 -7
  216. vellum/types/terminal_node_search_results_result.py +4 -8
  217. vellum/types/terminal_node_string_result.py +4 -8
  218. vellum/types/test_case_chat_history_variable_value.py +3 -7
  219. vellum/types/test_case_error_variable_value.py +3 -7
  220. vellum/types/test_case_function_call_variable_value.py +32 -0
  221. vellum/types/test_case_json_variable_value.py +3 -7
  222. vellum/types/test_case_number_variable_value.py +3 -7
  223. vellum/types/test_case_search_results_variable_value.py +3 -7
  224. vellum/types/test_case_string_variable_value.py +3 -7
  225. vellum/types/test_case_variable_value.py +12 -0
  226. vellum/types/test_suite_run_deployment_release_tag_exec_config.py +4 -8
  227. vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +5 -9
  228. vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +5 -9
  229. vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +4 -8
  230. vellum/types/test_suite_run_execution.py +3 -7
  231. vellum/types/test_suite_run_execution_chat_history_output.py +3 -7
  232. vellum/types/test_suite_run_execution_error_output.py +3 -7
  233. vellum/types/{registered_prompt_model_version.py → test_suite_run_execution_function_call_output.py} +8 -13
  234. vellum/types/test_suite_run_execution_json_output.py +3 -7
  235. vellum/types/test_suite_run_execution_metric_definition.py +3 -7
  236. vellum/types/test_suite_run_execution_metric_result.py +3 -7
  237. vellum/types/test_suite_run_execution_number_output.py +3 -7
  238. vellum/types/test_suite_run_execution_output.py +12 -0
  239. vellum/types/test_suite_run_execution_search_results_output.py +3 -7
  240. vellum/types/test_suite_run_execution_string_output.py +3 -7
  241. vellum/types/test_suite_run_external_exec_config.py +4 -8
  242. vellum/types/test_suite_run_external_exec_config_data.py +4 -8
  243. vellum/types/test_suite_run_external_exec_config_data_request.py +4 -8
  244. vellum/types/test_suite_run_external_exec_config_request.py +4 -8
  245. vellum/types/test_suite_run_metric_error_output.py +3 -7
  246. vellum/types/test_suite_run_metric_number_output.py +3 -7
  247. vellum/types/test_suite_run_metric_string_output.py +3 -7
  248. vellum/types/test_suite_run_read.py +5 -9
  249. vellum/types/test_suite_run_state.py +1 -37
  250. vellum/types/test_suite_run_test_suite.py +3 -7
  251. vellum/types/test_suite_run_workflow_release_tag_exec_config.py +4 -8
  252. vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +5 -9
  253. vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +5 -9
  254. vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +4 -8
  255. vellum/types/test_suite_test_case.py +3 -7
  256. vellum/types/upload_document_error_response.py +3 -7
  257. vellum/types/upload_document_response.py +4 -8
  258. vellum/types/vellum_error.py +3 -7
  259. vellum/types/vellum_error_code_enum.py +3 -32
  260. vellum/types/vellum_error_request.py +3 -7
  261. vellum/types/vellum_image.py +3 -7
  262. vellum/types/vellum_image_request.py +3 -7
  263. vellum/types/vellum_variable.py +3 -7
  264. vellum/types/vellum_variable_type.py +6 -57
  265. vellum/types/workflow_deployment_read.py +9 -13
  266. vellum/types/workflow_event_error.py +3 -7
  267. vellum/types/workflow_execution_actual_chat_history_request.py +8 -12
  268. vellum/types/workflow_execution_actual_json_request.py +8 -12
  269. vellum/types/workflow_execution_actual_string_request.py +8 -12
  270. vellum/types/workflow_execution_event_error_code.py +13 -52
  271. vellum/types/workflow_execution_event_type.py +1 -18
  272. vellum/types/workflow_execution_node_result_event.py +3 -7
  273. vellum/types/workflow_execution_workflow_result_event.py +3 -7
  274. vellum/types/workflow_node_result_event_state.py +3 -32
  275. vellum/types/workflow_output_array.py +4 -8
  276. vellum/types/workflow_output_chat_history.py +4 -8
  277. vellum/types/workflow_output_error.py +4 -8
  278. vellum/types/workflow_output_function_call.py +4 -8
  279. vellum/types/workflow_output_image.py +4 -8
  280. vellum/types/workflow_output_json.py +4 -8
  281. vellum/types/workflow_output_number.py +4 -8
  282. vellum/types/workflow_output_search_results.py +4 -8
  283. vellum/types/workflow_output_string.py +4 -8
  284. vellum/types/workflow_request_chat_history_input_request.py +4 -8
  285. vellum/types/workflow_request_json_input_request.py +4 -8
  286. vellum/types/workflow_request_number_input_request.py +4 -8
  287. vellum/types/workflow_request_string_input_request.py +4 -8
  288. vellum/types/workflow_result_event.py +3 -7
  289. vellum/types/workflow_result_event_output_data_array.py +4 -8
  290. vellum/types/workflow_result_event_output_data_chat_history.py +4 -8
  291. vellum/types/workflow_result_event_output_data_error.py +4 -8
  292. vellum/types/workflow_result_event_output_data_function_call.py +4 -8
  293. vellum/types/workflow_result_event_output_data_json.py +4 -8
  294. vellum/types/workflow_result_event_output_data_number.py +4 -8
  295. vellum/types/workflow_result_event_output_data_search_results.py +4 -8
  296. vellum/types/workflow_result_event_output_data_string.py +5 -9
  297. {vellum_ai-0.3.23.dist-info → vellum_ai-0.5.0.dist-info}/METADATA +1 -2
  298. vellum_ai-0.5.0.dist-info/RECORD +382 -0
  299. vellum/errors/conflict_error.py +0 -9
  300. vellum/resources/model_versions/__init__.py +0 -2
  301. vellum/resources/model_versions/client.py +0 -123
  302. vellum/resources/registered_prompts/__init__.py +0 -2
  303. vellum/resources/registered_prompts/client.py +0 -327
  304. vellum/types/block_type_enum.py +0 -36
  305. vellum/types/model_version_build_config.py +0 -40
  306. vellum/types/model_version_exec_config.py +0 -48
  307. vellum/types/model_version_exec_config_parameters.py +0 -37
  308. vellum/types/model_version_read.py +0 -82
  309. vellum/types/model_version_read_status_enum.py +0 -36
  310. vellum/types/model_version_sandbox_snapshot.py +0 -44
  311. vellum/types/prompt_template_block.py +0 -41
  312. vellum/types/prompt_template_block_data.py +0 -31
  313. vellum/types/prompt_template_block_data_request.py +0 -31
  314. vellum/types/prompt_template_block_properties.py +0 -47
  315. vellum/types/prompt_template_block_properties_request.py +0 -47
  316. vellum/types/prompt_template_block_request.py +0 -41
  317. vellum/types/prompt_template_block_state.py +0 -22
  318. vellum/types/provider_enum.py +0 -81
  319. vellum/types/register_prompt_model_parameters_request.py +0 -37
  320. vellum/types/register_prompt_prompt_info_request.py +0 -35
  321. vellum/types/register_prompt_response.py +0 -62
  322. vellum/types/registered_prompt_deployment.py +0 -42
  323. vellum/types/scenario_input_request.py +0 -34
  324. vellum/types/scenario_input_type_enum.py +0 -22
  325. vellum_ai-0.3.23.dist-info/RECORD +0 -394
  326. {vellum_ai-0.3.23.dist-info → vellum_ai-0.5.0.dist-info}/LICENSE +0 -0
  327. {vellum_ai-0.3.23.dist-info → vellum_ai-0.5.0.dist-info}/WHEEL +0 -0
@@ -1,327 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
- import urllib.parse
5
- from json.decoder import JSONDecodeError
6
-
7
- from ...core.api_error import ApiError
8
- from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
- from ...core.jsonable_encoder import jsonable_encoder
10
- from ...core.remove_none_from_dict import remove_none_from_dict
11
- from ...core.request_options import RequestOptions
12
- from ...errors.bad_request_error import BadRequestError
13
- from ...errors.conflict_error import ConflictError
14
- from ...errors.not_found_error import NotFoundError
15
- from ...types.provider_enum import ProviderEnum
16
- from ...types.register_prompt_error_response import RegisterPromptErrorResponse
17
- from ...types.register_prompt_model_parameters_request import RegisterPromptModelParametersRequest
18
- from ...types.register_prompt_prompt_info_request import RegisterPromptPromptInfoRequest
19
- from ...types.register_prompt_response import RegisterPromptResponse
20
-
21
- try:
22
- import pydantic.v1 as pydantic # type: ignore
23
- except ImportError:
24
- import pydantic # type: ignore
25
-
26
- # this is used as the default value for optional parameters
27
- OMIT = typing.cast(typing.Any, ...)
28
-
29
-
30
- class RegisteredPromptsClient:
31
- def __init__(self, *, client_wrapper: SyncClientWrapper):
32
- self._client_wrapper = client_wrapper
33
-
34
- def register_prompt(
35
- self,
36
- *,
37
- label: str,
38
- name: str,
39
- prompt: RegisterPromptPromptInfoRequest,
40
- provider: typing.Optional[ProviderEnum] = OMIT,
41
- model: str,
42
- parameters: RegisterPromptModelParametersRequest,
43
- meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
44
- request_options: typing.Optional[RequestOptions] = None,
45
- ) -> RegisterPromptResponse:
46
- """
47
- Registers a prompt within Vellum and creates associated Vellum entities. Intended to be used by integration
48
- partners, not directly by Vellum users.
49
-
50
- Under the hood, this endpoint creates a new sandbox, a new model version, and a new deployment.
51
-
52
- Parameters:
53
- - label: str. A human-friendly label for corresponding entities created in Vellum.
54
-
55
- - name: str. A uniquely-identifying name for corresponding entities created in Vellum.
56
-
57
- - prompt: RegisterPromptPromptInfoRequest. Information about how to execute the prompt template.
58
-
59
- - provider: typing.Optional[ProviderEnum]. The initial LLM provider to use for this prompt
60
-
61
- * `ANTHROPIC` - Anthropic
62
- * `AWS_BEDROCK` - AWS Bedrock
63
- * `AZURE_OPENAI` - Azure OpenAI
64
- * `COHERE` - Cohere
65
- * `GOOGLE` - Google
66
- * `HOSTED` - Hosted
67
- * `MOSAICML` - MosaicML
68
- * `OPENAI` - OpenAI
69
- * `FIREWORKS_AI` - Fireworks AI
70
- * `HUGGINGFACE` - HuggingFace
71
- * `MYSTIC` - Mystic
72
- * `PYQ` - Pyq
73
- * `REPLICATE` - Replicate
74
- - model: str. The initial model to use for this prompt
75
-
76
- - parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
77
-
78
- - meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
79
-
80
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
81
- ---
82
- from vellum import (
83
- BlockTypeEnum,
84
- PromptTemplateBlockDataRequest,
85
- PromptTemplateBlockPropertiesRequest,
86
- PromptTemplateBlockRequest,
87
- RegisteredPromptInputVariableRequest,
88
- RegisterPromptModelParametersRequest,
89
- RegisterPromptPromptInfoRequest,
90
- )
91
- from vellum.client import Vellum
92
-
93
- client = Vellum(
94
- api_key="YOUR_API_KEY",
95
- )
96
- client.registered_prompts.register_prompt(
97
- label="label",
98
- name="name",
99
- prompt=RegisterPromptPromptInfoRequest(
100
- prompt_block_data=PromptTemplateBlockDataRequest(
101
- version=1,
102
- blocks=[
103
- PromptTemplateBlockRequest(
104
- id="id",
105
- block_type=BlockTypeEnum.CHAT_MESSAGE,
106
- properties=PromptTemplateBlockPropertiesRequest(),
107
- )
108
- ],
109
- ),
110
- input_variables=[
111
- RegisteredPromptInputVariableRequest(
112
- key="key",
113
- )
114
- ],
115
- ),
116
- model="model",
117
- parameters=RegisterPromptModelParametersRequest(
118
- temperature=1.1,
119
- max_tokens=1,
120
- top_p=1.1,
121
- frequency_penalty=1.1,
122
- presence_penalty=1.1,
123
- ),
124
- )
125
- """
126
- _request: typing.Dict[str, typing.Any] = {
127
- "label": label,
128
- "name": name,
129
- "prompt": prompt,
130
- "model": model,
131
- "parameters": parameters,
132
- }
133
- if provider is not OMIT:
134
- _request["provider"] = provider
135
- if meta is not OMIT:
136
- _request["meta"] = meta
137
- _response = self._client_wrapper.httpx_client.request(
138
- "POST",
139
- urllib.parse.urljoin(
140
- f"{self._client_wrapper.get_environment().default}/", "v1/registered-prompts/register"
141
- ),
142
- params=jsonable_encoder(
143
- request_options.get("additional_query_parameters") if request_options is not None else None
144
- ),
145
- json=jsonable_encoder(_request)
146
- if request_options is None or request_options.get("additional_body_parameters") is None
147
- else {
148
- **jsonable_encoder(_request),
149
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
150
- },
151
- headers=jsonable_encoder(
152
- remove_none_from_dict(
153
- {
154
- **self._client_wrapper.get_headers(),
155
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
156
- }
157
- )
158
- ),
159
- timeout=request_options.get("timeout_in_seconds")
160
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
161
- else self._client_wrapper.get_timeout(),
162
- retries=0,
163
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
164
- )
165
- if 200 <= _response.status_code < 300:
166
- return pydantic.parse_obj_as(RegisterPromptResponse, _response.json()) # type: ignore
167
- if _response.status_code == 400:
168
- raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
169
- if _response.status_code == 404:
170
- raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
171
- if _response.status_code == 409:
172
- raise ConflictError(pydantic.parse_obj_as(RegisterPromptErrorResponse, _response.json())) # type: ignore
173
- try:
174
- _response_json = _response.json()
175
- except JSONDecodeError:
176
- raise ApiError(status_code=_response.status_code, body=_response.text)
177
- raise ApiError(status_code=_response.status_code, body=_response_json)
178
-
179
-
180
- class AsyncRegisteredPromptsClient:
181
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
182
- self._client_wrapper = client_wrapper
183
-
184
- async def register_prompt(
185
- self,
186
- *,
187
- label: str,
188
- name: str,
189
- prompt: RegisterPromptPromptInfoRequest,
190
- provider: typing.Optional[ProviderEnum] = OMIT,
191
- model: str,
192
- parameters: RegisterPromptModelParametersRequest,
193
- meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
194
- request_options: typing.Optional[RequestOptions] = None,
195
- ) -> RegisterPromptResponse:
196
- """
197
- Registers a prompt within Vellum and creates associated Vellum entities. Intended to be used by integration
198
- partners, not directly by Vellum users.
199
-
200
- Under the hood, this endpoint creates a new sandbox, a new model version, and a new deployment.
201
-
202
- Parameters:
203
- - label: str. A human-friendly label for corresponding entities created in Vellum.
204
-
205
- - name: str. A uniquely-identifying name for corresponding entities created in Vellum.
206
-
207
- - prompt: RegisterPromptPromptInfoRequest. Information about how to execute the prompt template.
208
-
209
- - provider: typing.Optional[ProviderEnum]. The initial LLM provider to use for this prompt
210
-
211
- * `ANTHROPIC` - Anthropic
212
- * `AWS_BEDROCK` - AWS Bedrock
213
- * `AZURE_OPENAI` - Azure OpenAI
214
- * `COHERE` - Cohere
215
- * `GOOGLE` - Google
216
- * `HOSTED` - Hosted
217
- * `MOSAICML` - MosaicML
218
- * `OPENAI` - OpenAI
219
- * `FIREWORKS_AI` - Fireworks AI
220
- * `HUGGINGFACE` - HuggingFace
221
- * `MYSTIC` - Mystic
222
- * `PYQ` - Pyq
223
- * `REPLICATE` - Replicate
224
- - model: str. The initial model to use for this prompt
225
-
226
- - parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
227
-
228
- - meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
229
-
230
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
231
- ---
232
- from vellum import (
233
- BlockTypeEnum,
234
- PromptTemplateBlockDataRequest,
235
- PromptTemplateBlockPropertiesRequest,
236
- PromptTemplateBlockRequest,
237
- RegisteredPromptInputVariableRequest,
238
- RegisterPromptModelParametersRequest,
239
- RegisterPromptPromptInfoRequest,
240
- )
241
- from vellum.client import AsyncVellum
242
-
243
- client = AsyncVellum(
244
- api_key="YOUR_API_KEY",
245
- )
246
- await client.registered_prompts.register_prompt(
247
- label="label",
248
- name="name",
249
- prompt=RegisterPromptPromptInfoRequest(
250
- prompt_block_data=PromptTemplateBlockDataRequest(
251
- version=1,
252
- blocks=[
253
- PromptTemplateBlockRequest(
254
- id="id",
255
- block_type=BlockTypeEnum.CHAT_MESSAGE,
256
- properties=PromptTemplateBlockPropertiesRequest(),
257
- )
258
- ],
259
- ),
260
- input_variables=[
261
- RegisteredPromptInputVariableRequest(
262
- key="key",
263
- )
264
- ],
265
- ),
266
- model="model",
267
- parameters=RegisterPromptModelParametersRequest(
268
- temperature=1.1,
269
- max_tokens=1,
270
- top_p=1.1,
271
- frequency_penalty=1.1,
272
- presence_penalty=1.1,
273
- ),
274
- )
275
- """
276
- _request: typing.Dict[str, typing.Any] = {
277
- "label": label,
278
- "name": name,
279
- "prompt": prompt,
280
- "model": model,
281
- "parameters": parameters,
282
- }
283
- if provider is not OMIT:
284
- _request["provider"] = provider
285
- if meta is not OMIT:
286
- _request["meta"] = meta
287
- _response = await self._client_wrapper.httpx_client.request(
288
- "POST",
289
- urllib.parse.urljoin(
290
- f"{self._client_wrapper.get_environment().default}/", "v1/registered-prompts/register"
291
- ),
292
- params=jsonable_encoder(
293
- request_options.get("additional_query_parameters") if request_options is not None else None
294
- ),
295
- json=jsonable_encoder(_request)
296
- if request_options is None or request_options.get("additional_body_parameters") is None
297
- else {
298
- **jsonable_encoder(_request),
299
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
300
- },
301
- headers=jsonable_encoder(
302
- remove_none_from_dict(
303
- {
304
- **self._client_wrapper.get_headers(),
305
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
306
- }
307
- )
308
- ),
309
- timeout=request_options.get("timeout_in_seconds")
310
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
311
- else self._client_wrapper.get_timeout(),
312
- retries=0,
313
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
314
- )
315
- if 200 <= _response.status_code < 300:
316
- return pydantic.parse_obj_as(RegisterPromptResponse, _response.json()) # type: ignore
317
- if _response.status_code == 400:
318
- raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
319
- if _response.status_code == 404:
320
- raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
321
- if _response.status_code == 409:
322
- raise ConflictError(pydantic.parse_obj_as(RegisterPromptErrorResponse, _response.json())) # type: ignore
323
- try:
324
- _response_json = _response.json()
325
- except JSONDecodeError:
326
- raise ApiError(status_code=_response.status_code, body=_response.text)
327
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,36 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class BlockTypeEnum(str, enum.Enum):
10
- """
11
- - `CHAT_MESSAGE` - CHAT_MESSAGE
12
- - `CHAT_HISTORY` - CHAT_HISTORY
13
- - `JINJA` - JINJA
14
- - `FUNCTION_DEFINITION` - FUNCTION_DEFINITION
15
- """
16
-
17
- CHAT_MESSAGE = "CHAT_MESSAGE"
18
- CHAT_HISTORY = "CHAT_HISTORY"
19
- JINJA = "JINJA"
20
- FUNCTION_DEFINITION = "FUNCTION_DEFINITION"
21
-
22
- def visit(
23
- self,
24
- chat_message: typing.Callable[[], T_Result],
25
- chat_history: typing.Callable[[], T_Result],
26
- jinja: typing.Callable[[], T_Result],
27
- function_definition: typing.Callable[[], T_Result],
28
- ) -> T_Result:
29
- if self is BlockTypeEnum.CHAT_MESSAGE:
30
- return chat_message()
31
- if self is BlockTypeEnum.CHAT_HISTORY:
32
- return chat_history()
33
- if self is BlockTypeEnum.JINJA:
34
- return jinja()
35
- if self is BlockTypeEnum.FUNCTION_DEFINITION:
36
- return function_definition()
@@ -1,40 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .model_version_sandbox_snapshot import ModelVersionSandboxSnapshot
8
-
9
- try:
10
- import pydantic.v1 as pydantic # type: ignore
11
- except ImportError:
12
- import pydantic # type: ignore
13
-
14
-
15
- class ModelVersionBuildConfig(pydantic.BaseModel):
16
- base_model: str = pydantic.Field()
17
- """
18
- The name of the base model used to create this model version, as identified by the LLM provider.
19
- """
20
-
21
- sandbox_snapshot: typing.Optional[ModelVersionSandboxSnapshot] = pydantic.Field(default=None)
22
- """
23
- Information about the sandbox snapshot that was used to create this model version, if applicable.
24
- """
25
-
26
- prompt_version_id: typing.Optional[str] = None
27
-
28
- def json(self, **kwargs: typing.Any) -> str:
29
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
- return super().json(**kwargs_with_defaults)
31
-
32
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
- return super().dict(**kwargs_with_defaults)
35
-
36
- class Config:
37
- frozen = True
38
- smart_union = True
39
- extra = pydantic.Extra.allow
40
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,48 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .model_version_exec_config_parameters import ModelVersionExecConfigParameters
8
- from .prompt_template_block_data import PromptTemplateBlockData
9
- from .vellum_variable import VellumVariable
10
-
11
- try:
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class ModelVersionExecConfig(pydantic.BaseModel):
18
- parameters: ModelVersionExecConfigParameters = pydantic.Field()
19
- """
20
- The generation parameters that are passed to the LLM provider at runtime.
21
- """
22
-
23
- input_variables: typing.List[VellumVariable] = pydantic.Field()
24
- """
25
- Input variables specified in the prompt template.
26
- """
27
-
28
- prompt_template: typing.Optional[str] = pydantic.Field(default=None)
29
- """
30
- The template used to generate prompts for this model version.
31
- """
32
-
33
- prompt_block_data: typing.Optional[PromptTemplateBlockData] = None
34
- prompt_syntax_version: typing.Optional[int] = None
35
-
36
- def json(self, **kwargs: typing.Any) -> str:
37
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
- return super().json(**kwargs_with_defaults)
39
-
40
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
41
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
42
- return super().dict(**kwargs_with_defaults)
43
-
44
- class Config:
45
- frozen = True
46
- smart_union = True
47
- extra = pydantic.Extra.allow
48
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,37 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic.v1 as pydantic # type: ignore
10
- except ImportError:
11
- import pydantic # type: ignore
12
-
13
-
14
- class ModelVersionExecConfigParameters(pydantic.BaseModel):
15
- temperature: typing.Optional[float] = None
16
- max_tokens: typing.Optional[int] = None
17
- top_p: float
18
- frequency_penalty: float
19
- presence_penalty: float
20
- logit_bias: typing.Optional[typing.Dict[str, typing.Optional[float]]] = None
21
- stop: typing.Optional[typing.List[str]] = None
22
- top_k: typing.Optional[float] = None
23
- custom_parameters: typing.Optional[typing.Dict[str, typing.Any]] = None
24
-
25
- def json(self, **kwargs: typing.Any) -> str:
26
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
- return super().json(**kwargs_with_defaults)
28
-
29
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
- return super().dict(**kwargs_with_defaults)
32
-
33
- class Config:
34
- frozen = True
35
- smart_union = True
36
- extra = pydantic.Extra.allow
37
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,82 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .model_version_build_config import ModelVersionBuildConfig
8
- from .model_version_exec_config import ModelVersionExecConfig
9
- from .model_version_read_status_enum import ModelVersionReadStatusEnum
10
- from .provider_enum import ProviderEnum
11
-
12
- try:
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class ModelVersionRead(pydantic.BaseModel):
19
- id: str = pydantic.Field()
20
- """
21
- Vellum-generated ID that uniquely identifies this model version.
22
- """
23
-
24
- created: dt.datetime = pydantic.Field()
25
- """
26
- Timestamp of when this model version was created.
27
- """
28
-
29
- label: str = pydantic.Field()
30
- """
31
- Human-friendly name for this model version.
32
- """
33
-
34
- provider: ProviderEnum = pydantic.Field()
35
- """
36
- Which LLM provider this model version is associated with.
37
-
38
- - `ANTHROPIC` - Anthropic
39
- - `AWS_BEDROCK` - AWS Bedrock
40
- - `AZURE_OPENAI` - Azure OpenAI
41
- - `COHERE` - Cohere
42
- - `GOOGLE` - Google
43
- - `HOSTED` - Hosted
44
- - `MOSAICML` - MosaicML
45
- - `OPENAI` - OpenAI
46
- - `FIREWORKS_AI` - Fireworks AI
47
- - `HUGGINGFACE` - HuggingFace
48
- - `MYSTIC` - Mystic
49
- - `PYQ` - Pyq
50
- - `REPLICATE` - Replicate
51
- """
52
-
53
- external_id: str = pydantic.Field()
54
- """
55
- The unique id of this model version as it exists in the above provider's system.
56
- """
57
-
58
- build_config: ModelVersionBuildConfig = pydantic.Field()
59
- """
60
- Configuration used to build this model version.
61
- """
62
-
63
- exec_config: ModelVersionExecConfig = pydantic.Field()
64
- """
65
- Configuration used to execute this model version.
66
- """
67
-
68
- status: typing.Optional[ModelVersionReadStatusEnum] = None
69
-
70
- def json(self, **kwargs: typing.Any) -> str:
71
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
72
- return super().json(**kwargs_with_defaults)
73
-
74
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
75
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
76
- return super().dict(**kwargs_with_defaults)
77
-
78
- class Config:
79
- frozen = True
80
- smart_union = True
81
- extra = pydantic.Extra.allow
82
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,36 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class ModelVersionReadStatusEnum(str, enum.Enum):
10
- """
11
- - `CREATING` - Creating
12
- - `READY` - Ready
13
- - `CREATION_FAILED` - Creation Failed
14
- - `DISABLED` - Disabled
15
- """
16
-
17
- CREATING = "CREATING"
18
- READY = "READY"
19
- CREATION_FAILED = "CREATION_FAILED"
20
- DISABLED = "DISABLED"
21
-
22
- def visit(
23
- self,
24
- creating: typing.Callable[[], T_Result],
25
- ready: typing.Callable[[], T_Result],
26
- creation_failed: typing.Callable[[], T_Result],
27
- disabled: typing.Callable[[], T_Result],
28
- ) -> T_Result:
29
- if self is ModelVersionReadStatusEnum.CREATING:
30
- return creating()
31
- if self is ModelVersionReadStatusEnum.READY:
32
- return ready()
33
- if self is ModelVersionReadStatusEnum.CREATION_FAILED:
34
- return creation_failed()
35
- if self is ModelVersionReadStatusEnum.DISABLED:
36
- return disabled()
@@ -1,44 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic.v1 as pydantic # type: ignore
10
- except ImportError:
11
- import pydantic # type: ignore
12
-
13
-
14
- class ModelVersionSandboxSnapshot(pydantic.BaseModel):
15
- id: str = pydantic.Field()
16
- """
17
- The ID of the sandbox snapshot.
18
- """
19
-
20
- prompt_index: typing.Optional[int] = pydantic.Field(default=None)
21
- """
22
- The index of the prompt in the sandbox snapshot.
23
- """
24
-
25
- prompt_id: typing.Optional[str] = pydantic.Field(default=None)
26
- """
27
- The id of the prompt in the sandbox snapshot.
28
- """
29
-
30
- sandbox_id: typing.Optional[str] = None
31
-
32
- def json(self, **kwargs: typing.Any) -> str:
33
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
- return super().json(**kwargs_with_defaults)
35
-
36
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
37
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
- return super().dict(**kwargs_with_defaults)
39
-
40
- class Config:
41
- frozen = True
42
- smart_union = True
43
- extra = pydantic.Extra.allow
44
- json_encoders = {dt.datetime: serialize_datetime}