vellum-ai 0.7.2__py3-none-any.whl → 0.7.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (483) hide show
  1. vellum/__init__.py +146 -94
  2. vellum/client.py +1051 -1097
  3. vellum/core/__init__.py +4 -1
  4. vellum/core/client_wrapper.py +7 -3
  5. vellum/core/http_client.py +365 -20
  6. vellum/core/jsonable_encoder.py +3 -0
  7. vellum/core/pydantic_utilities.py +16 -0
  8. vellum/core/query_encoder.py +33 -0
  9. vellum/core/remove_none_from_dict.py +2 -2
  10. vellum/core/request_options.py +2 -2
  11. vellum/resources/__init__.py +2 -0
  12. vellum/resources/deployments/client.py +354 -371
  13. vellum/resources/document_indexes/client.py +542 -610
  14. vellum/resources/documents/client.py +327 -399
  15. vellum/resources/folder_entities/client.py +56 -72
  16. vellum/{types/map_enum.py → resources/ml_models/__init__.py} +0 -3
  17. vellum/resources/ml_models/client.py +837 -0
  18. vellum/resources/sandboxes/client.py +204 -242
  19. vellum/resources/test_suite_runs/client.py +183 -213
  20. vellum/resources/test_suites/client.py +307 -311
  21. vellum/resources/workflow_deployments/client.py +230 -268
  22. vellum/resources/workflow_sandboxes/client.py +82 -96
  23. vellum/terraform/_jsii/vellum-ai_vellum@0.0.0.jsii.tgz +0 -0
  24. vellum/terraform/data_vellum_document_index/__init__.py +10 -10
  25. vellum/terraform/document_index/__init__.py +17 -17
  26. vellum/terraform/provider/__init__.py +57 -12
  27. vellum/terraform/versions.json +1 -1
  28. vellum/types/__init__.py +152 -96
  29. vellum/types/api_node_result.py +7 -3
  30. vellum/types/api_node_result_data.py +7 -3
  31. vellum/types/array_chat_message_content.py +7 -3
  32. vellum/types/array_chat_message_content_item.py +53 -12
  33. vellum/types/array_chat_message_content_item_request.py +53 -12
  34. vellum/types/array_chat_message_content_request.py +7 -3
  35. vellum/types/array_parameter_config.py +50 -0
  36. vellum/types/array_parameter_config_request.py +50 -0
  37. vellum/types/array_variable_value_item.py +102 -24
  38. vellum/types/array_vellum_value_item.py +102 -24
  39. vellum/types/array_vellum_value_item_request.py +102 -24
  40. vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large.py +7 -3
  41. vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large_request.py +7 -3
  42. vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1.py +7 -3
  43. vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1_request.py +7 -3
  44. vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1.py +7 -3
  45. vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1_request.py +7 -3
  46. vellum/types/boolean_parameter_config.py +35 -0
  47. vellum/types/boolean_parameter_config_request.py +35 -0
  48. vellum/types/chat_history_input_request.py +7 -3
  49. vellum/types/chat_message.py +7 -3
  50. vellum/types/chat_message_content.py +70 -16
  51. vellum/types/chat_message_content_request.py +70 -16
  52. vellum/types/chat_message_request.py +7 -3
  53. vellum/types/code_execution_node_array_result.py +7 -3
  54. vellum/types/code_execution_node_chat_history_result.py +7 -3
  55. vellum/types/code_execution_node_error_result.py +7 -3
  56. vellum/types/code_execution_node_function_call_result.py +7 -3
  57. vellum/types/code_execution_node_json_result.py +7 -3
  58. vellum/types/code_execution_node_number_result.py +7 -3
  59. vellum/types/code_execution_node_result.py +7 -3
  60. vellum/types/code_execution_node_result_data.py +7 -3
  61. vellum/types/code_execution_node_result_output.py +144 -32
  62. vellum/types/code_execution_node_search_results_result.py +7 -3
  63. vellum/types/code_execution_node_string_result.py +7 -3
  64. vellum/types/compile_prompt_deployment_expand_meta_request.py +42 -0
  65. vellum/types/compile_prompt_meta.py +35 -0
  66. vellum/types/conditional_node_result.py +7 -3
  67. vellum/types/conditional_node_result_data.py +7 -3
  68. vellum/types/const_parameter_config.py +35 -0
  69. vellum/types/const_parameter_config_request.py +35 -0
  70. vellum/types/create_test_suite_test_case_request.py +7 -3
  71. vellum/types/deployment_provider_payload_response.py +9 -3
  72. vellum/types/deployment_read.py +12 -3
  73. vellum/types/deployment_release_tag_deployment_history_item.py +7 -3
  74. vellum/types/deployment_release_tag_read.py +7 -3
  75. vellum/types/document_document_to_document_index.py +7 -3
  76. vellum/types/document_index_chunking.py +54 -12
  77. vellum/types/document_index_chunking_request.py +54 -12
  78. vellum/types/document_index_indexing_config.py +7 -3
  79. vellum/types/document_index_indexing_config_request.py +7 -3
  80. vellum/types/document_index_read.py +7 -3
  81. vellum/types/document_read.py +7 -3
  82. vellum/types/enriched_normalized_completion.py +7 -3
  83. vellum/types/error_variable_value.py +7 -3
  84. vellum/types/error_vellum_value.py +7 -3
  85. vellum/types/error_vellum_value_request.py +7 -3
  86. vellum/types/execute_prompt_event.py +82 -16
  87. vellum/types/execute_prompt_response.py +44 -8
  88. vellum/types/execute_workflow_response.py +7 -3
  89. vellum/types/execute_workflow_workflow_result_event.py +41 -8
  90. vellum/types/execution_array_vellum_value.py +7 -3
  91. vellum/types/execution_chat_history_vellum_value.py +7 -3
  92. vellum/types/execution_error_vellum_value.py +7 -3
  93. vellum/types/execution_function_call_vellum_value.py +7 -3
  94. vellum/types/execution_json_vellum_value.py +7 -3
  95. vellum/types/execution_number_vellum_value.py +7 -3
  96. vellum/types/execution_search_results_vellum_value.py +7 -3
  97. vellum/types/execution_string_vellum_value.py +7 -3
  98. vellum/types/execution_vellum_value.py +152 -32
  99. vellum/types/external_test_case_execution.py +7 -3
  100. vellum/types/external_test_case_execution_request.py +7 -3
  101. vellum/types/fulfilled_execute_prompt_event.py +7 -3
  102. vellum/types/fulfilled_execute_prompt_response.py +7 -3
  103. vellum/types/fulfilled_execute_workflow_workflow_result_event.py +7 -3
  104. vellum/types/fulfilled_prompt_execution_meta.py +7 -3
  105. vellum/types/fulfilled_workflow_node_result_event.py +7 -3
  106. vellum/types/function_call.py +7 -3
  107. vellum/types/function_call_chat_message_content.py +7 -3
  108. vellum/types/function_call_chat_message_content_request.py +7 -3
  109. vellum/types/function_call_chat_message_content_value.py +7 -3
  110. vellum/types/function_call_chat_message_content_value_request.py +7 -3
  111. vellum/types/function_call_request.py +7 -3
  112. vellum/types/function_call_variable_value.py +7 -3
  113. vellum/types/function_call_vellum_value.py +7 -3
  114. vellum/types/function_call_vellum_value_request.py +7 -3
  115. vellum/types/generate_options_request.py +7 -3
  116. vellum/types/generate_request.py +7 -3
  117. vellum/types/generate_result.py +7 -3
  118. vellum/types/generate_result_data.py +7 -3
  119. vellum/types/generate_result_error.py +7 -3
  120. vellum/types/generate_stream_response.py +7 -3
  121. vellum/types/generate_stream_result.py +7 -3
  122. vellum/types/generate_stream_result_data.py +7 -3
  123. vellum/types/hkunlp_instructor_xl_vectorizer.py +7 -3
  124. vellum/types/hkunlp_instructor_xl_vectorizer_request.py +7 -3
  125. vellum/types/hosted_by_enum.py +26 -0
  126. vellum/types/hugging_face_tokenizer_config.py +34 -0
  127. vellum/types/hugging_face_tokenizer_config_request.py +34 -0
  128. vellum/types/image_chat_message_content.py +7 -3
  129. vellum/types/image_chat_message_content_request.py +7 -3
  130. vellum/types/image_variable_value.py +7 -3
  131. vellum/types/image_vellum_value.py +7 -3
  132. vellum/types/image_vellum_value_request.py +7 -3
  133. vellum/types/indexing_config_vectorizer.py +117 -36
  134. vellum/types/indexing_config_vectorizer_request.py +117 -36
  135. vellum/types/initiated_execute_prompt_event.py +7 -3
  136. vellum/types/initiated_prompt_execution_meta.py +7 -3
  137. vellum/types/initiated_workflow_node_result_event.py +7 -3
  138. vellum/types/instructor_vectorizer_config.py +7 -3
  139. vellum/types/instructor_vectorizer_config_request.py +7 -3
  140. vellum/types/integer_parameter_config.py +39 -0
  141. vellum/types/integer_parameter_config_request.py +39 -0
  142. vellum/types/json_input_request.py +7 -3
  143. vellum/types/json_variable_value.py +7 -3
  144. vellum/types/json_vellum_value.py +7 -3
  145. vellum/types/json_vellum_value_request.py +7 -3
  146. vellum/types/map_node_result.py +7 -3
  147. vellum/types/map_node_result_data.py +7 -3
  148. vellum/types/merge_node_result.py +7 -3
  149. vellum/types/merge_node_result_data.py +7 -3
  150. vellum/types/metadata_filter_config_request.py +7 -3
  151. vellum/types/metadata_filter_rule_request.py +7 -3
  152. vellum/types/metric_node_result.py +7 -3
  153. vellum/types/{execute_workflow_stream_error_response.py → ml_model_build_config.py} +9 -8
  154. vellum/types/{execute_prompt_api_error_response.py → ml_model_build_config_request.py} +9 -8
  155. vellum/types/ml_model_developer.py +27 -0
  156. vellum/types/ml_model_developer_enum_value_label.py +31 -0
  157. vellum/types/ml_model_display_config_labelled.py +33 -0
  158. vellum/types/ml_model_display_config_request.py +33 -0
  159. vellum/types/ml_model_display_tag.py +7 -0
  160. vellum/types/ml_model_display_tag_enum_value_label.py +31 -0
  161. vellum/types/ml_model_exec_config.py +40 -0
  162. vellum/types/ml_model_exec_config_request.py +40 -0
  163. vellum/types/ml_model_family.py +29 -0
  164. vellum/types/ml_model_family_enum_value_label.py +31 -0
  165. vellum/types/ml_model_feature.py +18 -0
  166. vellum/types/ml_model_parameter_config.py +42 -0
  167. vellum/types/ml_model_parameter_config_request.py +42 -0
  168. vellum/types/ml_model_read.py +108 -0
  169. vellum/types/ml_model_request_authorization_config.py +30 -0
  170. vellum/types/ml_model_request_authorization_config_request.py +30 -0
  171. vellum/types/ml_model_request_authorization_config_type_enum.py +5 -0
  172. vellum/types/ml_model_request_config.py +32 -0
  173. vellum/types/ml_model_request_config_request.py +32 -0
  174. vellum/types/ml_model_response_config.py +31 -0
  175. vellum/types/ml_model_response_config_request.py +31 -0
  176. vellum/types/ml_model_tokenizer_config.py +59 -0
  177. vellum/types/ml_model_tokenizer_config_request.py +61 -0
  178. vellum/types/ml_model_usage.py +7 -3
  179. vellum/types/named_scenario_input_chat_history_variable_value_request.py +7 -3
  180. vellum/types/named_scenario_input_request.py +38 -8
  181. vellum/types/named_scenario_input_string_variable_value_request.py +7 -3
  182. vellum/types/named_test_case_array_variable_value.py +7 -3
  183. vellum/types/named_test_case_array_variable_value_request.py +7 -3
  184. vellum/types/named_test_case_chat_history_variable_value.py +7 -3
  185. vellum/types/named_test_case_chat_history_variable_value_request.py +7 -3
  186. vellum/types/named_test_case_error_variable_value.py +7 -3
  187. vellum/types/named_test_case_error_variable_value_request.py +7 -3
  188. vellum/types/named_test_case_function_call_variable_value.py +7 -3
  189. vellum/types/named_test_case_function_call_variable_value_request.py +7 -3
  190. vellum/types/named_test_case_json_variable_value.py +7 -3
  191. vellum/types/named_test_case_json_variable_value_request.py +7 -3
  192. vellum/types/named_test_case_number_variable_value.py +7 -3
  193. vellum/types/named_test_case_number_variable_value_request.py +7 -3
  194. vellum/types/named_test_case_search_results_variable_value.py +7 -3
  195. vellum/types/named_test_case_search_results_variable_value_request.py +7 -3
  196. vellum/types/named_test_case_string_variable_value.py +7 -3
  197. vellum/types/named_test_case_string_variable_value_request.py +7 -3
  198. vellum/types/named_test_case_variable_value.py +144 -32
  199. vellum/types/named_test_case_variable_value_request.py +144 -32
  200. vellum/types/node_input_compiled_array_value.py +7 -3
  201. vellum/types/node_input_compiled_chat_history_value.py +7 -3
  202. vellum/types/node_input_compiled_error_value.py +7 -3
  203. vellum/types/node_input_compiled_function_call.py +7 -3
  204. vellum/types/node_input_compiled_json_value.py +7 -3
  205. vellum/types/node_input_compiled_number_value.py +7 -3
  206. vellum/types/node_input_compiled_search_results_value.py +7 -3
  207. vellum/types/node_input_compiled_string_value.py +7 -3
  208. vellum/types/node_input_variable_compiled_value.py +152 -32
  209. vellum/types/node_output_compiled_array_value.py +7 -3
  210. vellum/types/node_output_compiled_chat_history_value.py +7 -3
  211. vellum/types/node_output_compiled_error_value.py +7 -3
  212. vellum/types/node_output_compiled_function_call_value.py +7 -3
  213. vellum/types/node_output_compiled_json_value.py +7 -3
  214. vellum/types/node_output_compiled_number_value.py +7 -3
  215. vellum/types/node_output_compiled_search_results_value.py +7 -3
  216. vellum/types/node_output_compiled_string_value.py +7 -3
  217. vellum/types/node_output_compiled_value.py +153 -32
  218. vellum/types/normalized_log_probs.py +7 -3
  219. vellum/types/normalized_token_log_probs.py +7 -3
  220. vellum/types/number_parameter_config.py +40 -0
  221. vellum/types/number_parameter_config_request.py +40 -0
  222. vellum/types/number_variable_value.py +7 -3
  223. vellum/types/number_vellum_value.py +7 -3
  224. vellum/types/number_vellum_value_request.py +7 -3
  225. vellum/types/object_parameter_config.py +49 -0
  226. vellum/types/object_parameter_config_request.py +49 -0
  227. vellum/types/one_of_parameter_config.py +44 -0
  228. vellum/types/one_of_parameter_config_request.py +44 -0
  229. vellum/types/open_ai_vectorizer_config.py +7 -3
  230. vellum/types/open_ai_vectorizer_config_request.py +7 -3
  231. vellum/types/open_ai_vectorizer_text_embedding_3_large.py +7 -3
  232. vellum/types/open_ai_vectorizer_text_embedding_3_large_request.py +7 -3
  233. vellum/types/open_ai_vectorizer_text_embedding_3_small.py +7 -3
  234. vellum/types/open_ai_vectorizer_text_embedding_3_small_request.py +7 -3
  235. vellum/types/open_ai_vectorizer_text_embedding_ada_002.py +7 -3
  236. vellum/types/open_ai_vectorizer_text_embedding_ada_002_request.py +7 -3
  237. vellum/types/paginated_document_index_read_list.py +7 -3
  238. vellum/types/paginated_ml_model_read_list.py +33 -0
  239. vellum/types/paginated_slim_deployment_read_list.py +7 -3
  240. vellum/types/paginated_slim_document_list.py +7 -3
  241. vellum/types/paginated_slim_workflow_deployment_list.py +7 -3
  242. vellum/types/paginated_test_suite_run_execution_list.py +7 -3
  243. vellum/types/paginated_test_suite_test_case_list.py +7 -3
  244. vellum/types/parameter_config.py +251 -0
  245. vellum/types/parameter_config_request.py +251 -0
  246. vellum/types/pdf_search_result_meta_source.py +7 -3
  247. vellum/types/pdf_search_result_meta_source_request.py +7 -3
  248. vellum/types/prompt_deployment_expand_meta_request_request.py +7 -3
  249. vellum/types/prompt_deployment_input_request.py +55 -12
  250. vellum/types/prompt_execution_meta.py +7 -3
  251. vellum/types/prompt_node_execution_meta.py +7 -3
  252. vellum/types/prompt_node_result.py +7 -3
  253. vellum/types/prompt_node_result_data.py +7 -3
  254. vellum/types/prompt_output.py +69 -16
  255. vellum/types/raw_prompt_execution_overrides_request.py +7 -3
  256. vellum/types/reducto_chunker_config.py +7 -3
  257. vellum/types/reducto_chunker_config_request.py +7 -3
  258. vellum/types/reducto_chunking.py +7 -3
  259. vellum/types/reducto_chunking_request.py +7 -3
  260. vellum/types/rejected_execute_prompt_event.py +7 -3
  261. vellum/types/rejected_execute_prompt_response.py +7 -3
  262. vellum/types/rejected_execute_workflow_workflow_result_event.py +7 -3
  263. vellum/types/rejected_prompt_execution_meta.py +7 -3
  264. vellum/types/rejected_workflow_node_result_event.py +7 -3
  265. vellum/types/replace_test_suite_test_case_request.py +7 -3
  266. vellum/types/sandbox_scenario.py +7 -3
  267. vellum/types/scenario_input.py +38 -8
  268. vellum/types/scenario_input_chat_history_variable_value.py +7 -3
  269. vellum/types/scenario_input_string_variable_value.py +7 -3
  270. vellum/types/search_filters_request.py +7 -3
  271. vellum/types/search_node_result.py +7 -3
  272. vellum/types/search_node_result_data.py +7 -3
  273. vellum/types/search_request_options_request.py +7 -3
  274. vellum/types/search_response.py +7 -3
  275. vellum/types/search_result.py +7 -3
  276. vellum/types/search_result_document.py +7 -3
  277. vellum/types/search_result_document_request.py +7 -3
  278. vellum/types/search_result_merging_request.py +7 -3
  279. vellum/types/search_result_meta.py +7 -3
  280. vellum/types/search_result_meta_request.py +7 -3
  281. vellum/types/search_result_meta_source.py +21 -5
  282. vellum/types/search_result_meta_source_request.py +21 -5
  283. vellum/types/search_result_request.py +7 -3
  284. vellum/types/search_weights_request.py +7 -3
  285. vellum/types/sentence_chunker_config.py +7 -3
  286. vellum/types/sentence_chunker_config_request.py +7 -3
  287. vellum/types/sentence_chunking.py +7 -3
  288. vellum/types/sentence_chunking_request.py +7 -3
  289. vellum/types/slim_deployment_read.py +11 -3
  290. vellum/types/slim_document.py +7 -3
  291. vellum/types/slim_workflow_deployment.py +12 -3
  292. vellum/types/streaming_execute_prompt_event.py +7 -3
  293. vellum/types/streaming_prompt_execution_meta.py +7 -3
  294. vellum/types/streaming_workflow_node_result_event.py +7 -3
  295. vellum/types/string_chat_message_content.py +7 -3
  296. vellum/types/string_chat_message_content_request.py +7 -3
  297. vellum/types/string_input_request.py +7 -3
  298. vellum/types/string_parameter_config.py +39 -0
  299. vellum/types/string_parameter_config_request.py +39 -0
  300. vellum/types/string_variable_value.py +7 -3
  301. vellum/types/string_vellum_value.py +7 -3
  302. vellum/types/string_vellum_value_request.py +7 -3
  303. vellum/types/submit_completion_actual_request.py +7 -3
  304. vellum/types/submit_workflow_execution_actual_request.py +67 -12
  305. vellum/types/subworkflow_node_result.py +7 -3
  306. vellum/types/subworkflow_node_result_data.py +7 -3
  307. vellum/types/templating_node_array_result.py +7 -3
  308. vellum/types/templating_node_chat_history_result.py +7 -3
  309. vellum/types/templating_node_error_result.py +7 -3
  310. vellum/types/templating_node_function_call_result.py +7 -3
  311. vellum/types/templating_node_json_result.py +7 -3
  312. vellum/types/templating_node_number_result.py +7 -3
  313. vellum/types/templating_node_result.py +7 -3
  314. vellum/types/templating_node_result_data.py +7 -3
  315. vellum/types/templating_node_result_output.py +144 -32
  316. vellum/types/templating_node_search_results_result.py +7 -3
  317. vellum/types/templating_node_string_result.py +7 -3
  318. vellum/types/terminal_node_array_result.py +7 -3
  319. vellum/types/terminal_node_chat_history_result.py +7 -3
  320. vellum/types/terminal_node_error_result.py +7 -3
  321. vellum/types/terminal_node_function_call_result.py +7 -3
  322. vellum/types/terminal_node_json_result.py +7 -3
  323. vellum/types/terminal_node_number_result.py +7 -3
  324. vellum/types/terminal_node_result.py +7 -3
  325. vellum/types/terminal_node_result_data.py +7 -3
  326. vellum/types/terminal_node_result_output.py +152 -32
  327. vellum/types/terminal_node_search_results_result.py +7 -3
  328. vellum/types/terminal_node_string_result.py +7 -3
  329. vellum/types/test_case_array_variable_value.py +7 -3
  330. vellum/types/test_case_chat_history_variable_value.py +7 -3
  331. vellum/types/test_case_error_variable_value.py +7 -3
  332. vellum/types/test_case_function_call_variable_value.py +7 -3
  333. vellum/types/test_case_json_variable_value.py +8 -4
  334. vellum/types/test_case_number_variable_value.py +7 -3
  335. vellum/types/test_case_search_results_variable_value.py +7 -3
  336. vellum/types/test_case_string_variable_value.py +7 -3
  337. vellum/types/test_case_variable_value.py +152 -32
  338. vellum/types/test_suite_run_deployment_release_tag_exec_config.py +7 -3
  339. vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +7 -3
  340. vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +7 -3
  341. vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +7 -3
  342. vellum/types/test_suite_run_exec_config.py +57 -12
  343. vellum/types/test_suite_run_exec_config_request.py +61 -12
  344. vellum/types/test_suite_run_execution.py +7 -3
  345. vellum/types/test_suite_run_execution_array_output.py +7 -3
  346. vellum/types/test_suite_run_execution_chat_history_output.py +7 -3
  347. vellum/types/test_suite_run_execution_error_output.py +7 -3
  348. vellum/types/test_suite_run_execution_function_call_output.py +7 -3
  349. vellum/types/test_suite_run_execution_json_output.py +7 -3
  350. vellum/types/test_suite_run_execution_metric_definition.py +7 -3
  351. vellum/types/test_suite_run_execution_metric_result.py +7 -3
  352. vellum/types/test_suite_run_execution_number_output.py +7 -3
  353. vellum/types/test_suite_run_execution_output.py +152 -32
  354. vellum/types/test_suite_run_execution_search_results_output.py +7 -3
  355. vellum/types/test_suite_run_execution_string_output.py +7 -3
  356. vellum/types/test_suite_run_external_exec_config.py +7 -3
  357. vellum/types/test_suite_run_external_exec_config_data.py +7 -3
  358. vellum/types/test_suite_run_external_exec_config_data_request.py +7 -3
  359. vellum/types/test_suite_run_external_exec_config_request.py +7 -3
  360. vellum/types/test_suite_run_metric_error_output.py +7 -3
  361. vellum/types/test_suite_run_metric_number_output.py +7 -3
  362. vellum/types/test_suite_run_metric_output.py +55 -12
  363. vellum/types/test_suite_run_metric_string_output.py +7 -3
  364. vellum/types/test_suite_run_read.py +7 -3
  365. vellum/types/test_suite_run_test_suite.py +7 -3
  366. vellum/types/test_suite_run_workflow_release_tag_exec_config.py +7 -3
  367. vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +7 -3
  368. vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +7 -3
  369. vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +7 -3
  370. vellum/types/test_suite_test_case.py +7 -3
  371. vellum/types/test_suite_test_case_bulk_operation_request.py +75 -16
  372. vellum/types/test_suite_test_case_bulk_result.py +74 -16
  373. vellum/types/test_suite_test_case_create_bulk_operation_request.py +7 -3
  374. vellum/types/test_suite_test_case_created_bulk_result.py +7 -3
  375. vellum/types/test_suite_test_case_created_bulk_result_data.py +7 -3
  376. vellum/types/test_suite_test_case_delete_bulk_operation_data_request.py +7 -3
  377. vellum/types/test_suite_test_case_delete_bulk_operation_request.py +7 -3
  378. vellum/types/test_suite_test_case_deleted_bulk_result.py +7 -3
  379. vellum/types/test_suite_test_case_deleted_bulk_result_data.py +7 -3
  380. vellum/types/test_suite_test_case_rejected_bulk_result.py +7 -3
  381. vellum/types/test_suite_test_case_replace_bulk_operation_request.py +7 -3
  382. vellum/types/test_suite_test_case_replaced_bulk_result.py +7 -3
  383. vellum/types/test_suite_test_case_replaced_bulk_result_data.py +7 -3
  384. vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +7 -3
  385. vellum/types/{execute_workflow_error_response.py → tik_token_tokenizer_config.py} +11 -6
  386. vellum/types/{generate_error_response.py → tik_token_tokenizer_config_request.py} +11 -6
  387. vellum/types/token_overlapping_window_chunker_config.py +7 -3
  388. vellum/types/token_overlapping_window_chunker_config_request.py +7 -3
  389. vellum/types/token_overlapping_window_chunking.py +7 -3
  390. vellum/types/token_overlapping_window_chunking_request.py +7 -3
  391. vellum/types/upload_document_response.py +7 -3
  392. vellum/types/upsert_test_suite_test_case_request.py +7 -3
  393. vellum/types/vellum_error.py +7 -3
  394. vellum/types/vellum_error_request.py +7 -3
  395. vellum/types/vellum_image.py +7 -3
  396. vellum/types/vellum_image_request.py +7 -3
  397. vellum/types/vellum_variable.py +7 -3
  398. vellum/types/visibility_enum.py +5 -0
  399. vellum/types/workflow_deployment_read.py +12 -3
  400. vellum/types/workflow_event_error.py +7 -3
  401. vellum/types/workflow_execution_actual_chat_history_request.py +7 -3
  402. vellum/types/workflow_execution_actual_json_request.py +7 -3
  403. vellum/types/workflow_execution_actual_string_request.py +7 -3
  404. vellum/types/workflow_execution_node_result_event.py +7 -3
  405. vellum/types/workflow_execution_workflow_result_event.py +7 -3
  406. vellum/types/workflow_expand_meta_request.py +7 -3
  407. vellum/types/workflow_node_result_data.py +189 -45
  408. vellum/types/workflow_node_result_event.py +101 -20
  409. vellum/types/workflow_output.py +171 -36
  410. vellum/types/workflow_output_array.py +7 -3
  411. vellum/types/workflow_output_chat_history.py +7 -3
  412. vellum/types/workflow_output_error.py +7 -3
  413. vellum/types/workflow_output_function_call.py +7 -3
  414. vellum/types/workflow_output_image.py +7 -3
  415. vellum/types/workflow_output_json.py +7 -3
  416. vellum/types/workflow_output_number.py +7 -3
  417. vellum/types/workflow_output_search_results.py +7 -3
  418. vellum/types/workflow_output_string.py +7 -3
  419. vellum/types/workflow_release_tag_read.py +7 -3
  420. vellum/types/workflow_release_tag_workflow_deployment_history_item.py +7 -3
  421. vellum/types/workflow_request_chat_history_input_request.py +7 -3
  422. vellum/types/workflow_request_input_request.py +72 -16
  423. vellum/types/workflow_request_json_input_request.py +7 -3
  424. vellum/types/workflow_request_number_input_request.py +7 -3
  425. vellum/types/workflow_request_string_input_request.py +7 -3
  426. vellum/types/workflow_result_event.py +7 -3
  427. vellum/types/workflow_result_event_output_data.py +178 -33
  428. vellum/types/workflow_result_event_output_data_array.py +7 -3
  429. vellum/types/workflow_result_event_output_data_chat_history.py +7 -3
  430. vellum/types/workflow_result_event_output_data_error.py +7 -3
  431. vellum/types/workflow_result_event_output_data_function_call.py +7 -3
  432. vellum/types/workflow_result_event_output_data_json.py +7 -3
  433. vellum/types/workflow_result_event_output_data_number.py +7 -3
  434. vellum/types/workflow_result_event_output_data_search_results.py +7 -3
  435. vellum/types/workflow_result_event_output_data_string.py +7 -3
  436. vellum/types/workflow_stream_event.py +43 -8
  437. {vellum_ai-0.7.2.dist-info → vellum_ai-0.7.5.dist-info}/METADATA +13 -1
  438. vellum_ai-0.7.5.dist-info/RECORD +502 -0
  439. vellum/types/array_enum.py +0 -5
  440. vellum/types/chat_history_enum.py +0 -5
  441. vellum/types/create_enum.py +0 -5
  442. vellum/types/created_enum.py +0 -5
  443. vellum/types/delete_enum.py +0 -5
  444. vellum/types/deleted_enum.py +0 -5
  445. vellum/types/error_enum.py +0 -5
  446. vellum/types/function_call_enum.py +0 -5
  447. vellum/types/hkunlp_instructor_xl_enum.py +0 -5
  448. vellum/types/image_enum.py +0 -5
  449. vellum/types/initiated_enum.py +0 -5
  450. vellum/types/intfloat_multilingual_e_5_large_enum.py +0 -5
  451. vellum/types/json_enum.py +0 -5
  452. vellum/types/merge_enum.py +0 -5
  453. vellum/types/metric_enum.py +0 -5
  454. vellum/types/number_enum.py +0 -5
  455. vellum/types/pdf_enum.py +0 -5
  456. vellum/types/reducto_chunker_enum.py +0 -5
  457. vellum/types/rejected_enum.py +0 -5
  458. vellum/types/replace_enum.py +0 -5
  459. vellum/types/replaced_enum.py +0 -5
  460. vellum/types/search_error_response.py +0 -28
  461. vellum/types/search_results_enum.py +0 -5
  462. vellum/types/sentence_chunker_enum.py +0 -5
  463. vellum/types/sentence_transformers_multi_qa_mpnet_base_cos_v_1_enum.py +0 -5
  464. vellum/types/sentence_transformers_multi_qa_mpnet_base_dot_v_1_enum.py +0 -5
  465. vellum/types/streaming_enum.py +0 -5
  466. vellum/types/string_enum.py +0 -5
  467. vellum/types/submit_completion_actuals_error_response.py +0 -25
  468. vellum/types/subworkflow_enum.py +0 -5
  469. vellum/types/test_suite_run_deployment_release_tag_exec_config_type_enum.py +0 -5
  470. vellum/types/test_suite_run_external_exec_config_type_enum.py +0 -5
  471. vellum/types/test_suite_run_metric_error_output_type_enum.py +0 -5
  472. vellum/types/test_suite_run_metric_number_output_type_enum.py +0 -5
  473. vellum/types/test_suite_run_metric_string_output_type_enum.py +0 -5
  474. vellum/types/test_suite_run_workflow_release_tag_exec_config_type_enum.py +0 -5
  475. vellum/types/text_embedding_3_large_enum.py +0 -5
  476. vellum/types/text_embedding_3_small_enum.py +0 -5
  477. vellum/types/text_embedding_ada_002_enum.py +0 -5
  478. vellum/types/token_overlapping_window_chunker_enum.py +0 -5
  479. vellum/types/upload_document_error_response.py +0 -25
  480. vellum/types/upsert_enum.py +0 -5
  481. vellum_ai-0.7.2.dist-info/RECORD +0 -494
  482. {vellum_ai-0.7.2.dist-info → vellum_ai-0.7.5.dist-info}/LICENSE +0 -0
  483. {vellum_ai-0.7.2.dist-info → vellum_ai-0.7.5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,837 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from json.decoder import JSONDecodeError
5
+
6
+ from ...core.api_error import ApiError
7
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from ...core.jsonable_encoder import jsonable_encoder
9
+ from ...core.pydantic_utilities import pydantic_v1
10
+ from ...core.request_options import RequestOptions
11
+ from ...types.hosted_by_enum import HostedByEnum
12
+ from ...types.ml_model_build_config_request import MlModelBuildConfigRequest
13
+ from ...types.ml_model_developer import MlModelDeveloper
14
+ from ...types.ml_model_display_config_request import MlModelDisplayConfigRequest
15
+ from ...types.ml_model_exec_config_request import MlModelExecConfigRequest
16
+ from ...types.ml_model_family import MlModelFamily
17
+ from ...types.ml_model_parameter_config_request import MlModelParameterConfigRequest
18
+ from ...types.ml_model_read import MlModelRead
19
+ from ...types.paginated_ml_model_read_list import PaginatedMlModelReadList
20
+ from ...types.visibility_enum import VisibilityEnum
21
+
22
+ # this is used as the default value for optional parameters
23
+ OMIT = typing.cast(typing.Any, ...)
24
+
25
+
26
+ class MlModelsClient:
27
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
28
+ self._client_wrapper = client_wrapper
29
+
30
+ def list(
31
+ self,
32
+ *,
33
+ limit: typing.Optional[int] = None,
34
+ offset: typing.Optional[int] = None,
35
+ ordering: typing.Optional[str] = None,
36
+ request_options: typing.Optional[RequestOptions] = None,
37
+ ) -> PaginatedMlModelReadList:
38
+ """
39
+ List all ML Models that your Workspace has access to.
40
+
41
+ Parameters
42
+ ----------
43
+ limit : typing.Optional[int]
44
+ Number of results to return per page.
45
+
46
+ offset : typing.Optional[int]
47
+ The initial index from which to return the results.
48
+
49
+ ordering : typing.Optional[str]
50
+ Which field to use when ordering the results.
51
+
52
+ request_options : typing.Optional[RequestOptions]
53
+ Request-specific configuration.
54
+
55
+ Returns
56
+ -------
57
+ PaginatedMlModelReadList
58
+
59
+
60
+ Examples
61
+ --------
62
+ from vellum.client import Vellum
63
+
64
+ client = Vellum(
65
+ api_key="YOUR_API_KEY",
66
+ )
67
+ client.ml_models.list()
68
+ """
69
+ _response = self._client_wrapper.httpx_client.request(
70
+ "v1/ml-models",
71
+ base_url=self._client_wrapper.get_environment().default,
72
+ method="GET",
73
+ params={"limit": limit, "offset": offset, "ordering": ordering},
74
+ request_options=request_options,
75
+ )
76
+ try:
77
+ if 200 <= _response.status_code < 300:
78
+ return pydantic_v1.parse_obj_as(PaginatedMlModelReadList, _response.json()) # type: ignore
79
+ _response_json = _response.json()
80
+ except JSONDecodeError:
81
+ raise ApiError(status_code=_response.status_code, body=_response.text)
82
+ raise ApiError(status_code=_response.status_code, body=_response_json)
83
+
84
+ def create(
85
+ self,
86
+ *,
87
+ name: str,
88
+ family: MlModelFamily,
89
+ exec_config: MlModelExecConfigRequest,
90
+ hosted_by: typing.Optional[HostedByEnum] = OMIT,
91
+ developed_by: typing.Optional[MlModelDeveloper] = OMIT,
92
+ build_config: typing.Optional[MlModelBuildConfigRequest] = OMIT,
93
+ parameter_config: typing.Optional[MlModelParameterConfigRequest] = OMIT,
94
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
95
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
96
+ request_options: typing.Optional[RequestOptions] = None,
97
+ ) -> MlModelRead:
98
+ """
99
+ Creates a new ML Model.
100
+
101
+ Parameters
102
+ ----------
103
+ name : str
104
+ The unique name of the ML Model.
105
+
106
+ family : MlModelFamily
107
+ The family of the ML Model.
108
+
109
+ * `CAPYBARA` - Capybara
110
+ * `CHAT_GPT` - Chat GPT
111
+ * `CLAUDE` - Claude
112
+ * `COHERE` - Cohere
113
+ * `FALCON` - Falcon
114
+ * `GEMINI` - Gemini
115
+ * `GRANITE` - Granite
116
+ * `GPT3` - GPT-3
117
+ * `FIREWORKS` - Fireworks
118
+ * `LLAMA2` - Llama2
119
+ * `LLAMA3` - Llama3
120
+ * `MISTRAL` - Mistral
121
+ * `MPT` - MPT
122
+ * `OPENCHAT` - OpenChat
123
+ * `PALM` - PaLM
124
+ * `SOLAR` - Solar
125
+ * `TITAN` - Titan
126
+ * `WIZARD` - Wizard
127
+ * `YI` - Yi
128
+ * `ZEPHYR` - Zephyr
129
+
130
+ exec_config : MlModelExecConfigRequest
131
+ Configuration for how to execute the ML Model.
132
+
133
+ hosted_by : typing.Optional[HostedByEnum]
134
+ The organization hosting the ML Model.
135
+
136
+ * `ANTHROPIC` - ANTHROPIC
137
+ * `AWS_BEDROCK` - AWS_BEDROCK
138
+ * `AZURE_OPENAI` - AZURE_OPENAI
139
+ * `COHERE` - COHERE
140
+ * `CUSTOM` - CUSTOM
141
+ * `FIREWORKS_AI` - FIREWORKS_AI
142
+ * `GOOGLE` - GOOGLE
143
+ * `GOOGLE_VERTEX_AI` - GOOGLE_VERTEX_AI
144
+ * `GROQ` - GROQ
145
+ * `HUGGINGFACE` - HUGGINGFACE
146
+ * `IBM_WATSONX` - IBM_WATSONX
147
+ * `MOSAICML` - MOSAICML
148
+ * `MYSTIC` - MYSTIC
149
+ * `OPENAI` - OPENAI
150
+ * `OPENPIPE` - OPENPIPE
151
+ * `PYQ` - PYQ
152
+ * `REPLICATE` - REPLICATE
153
+
154
+ developed_by : typing.Optional[MlModelDeveloper]
155
+ The organization that developed the ML Model.
156
+
157
+ * `01_AI` - 01_AI
158
+ * `AMAZON` - AMAZON
159
+ * `ANTHROPIC` - ANTHROPIC
160
+ * `COHERE` - COHERE
161
+ * `ELUTHERAI` - ELUTHERAI
162
+ * `FIREWORKS_AI` - FIREWORKS_AI
163
+ * `GOOGLE` - GOOGLE
164
+ * `HUGGINGFACE` - HUGGINGFACE
165
+ * `IBM` - IBM
166
+ * `META` - META
167
+ * `MISTRAL_AI` - MISTRAL_AI
168
+ * `MOSAICML` - MOSAICML
169
+ * `NOUS_RESEARCH` - NOUS_RESEARCH
170
+ * `OPENAI` - OPENAI
171
+ * `OPENCHAT` - OPENCHAT
172
+ * `OPENPIPE` - OPENPIPE
173
+ * `TII` - TII
174
+ * `WIZARDLM` - WIZARDLM
175
+
176
+ build_config : typing.Optional[MlModelBuildConfigRequest]
177
+ Configuration for how the ML Model was built.
178
+
179
+ parameter_config : typing.Optional[MlModelParameterConfigRequest]
180
+ Configuration for the ML Model's parameters.
181
+
182
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
183
+ Configuration for how to display the ML Model.
184
+
185
+ visibility : typing.Optional[VisibilityEnum]
186
+ The visibility of the ML Model.
187
+
188
+ * `DEFAULT` - DEFAULT
189
+ * `PUBLIC` - PUBLIC
190
+ * `PRIVATE` - PRIVATE
191
+ * `DISABLED` - DISABLED
192
+
193
+ request_options : typing.Optional[RequestOptions]
194
+ Request-specific configuration.
195
+
196
+ Returns
197
+ -------
198
+ MlModelRead
199
+
200
+
201
+ Examples
202
+ --------
203
+ from vellum import MlModelExecConfigRequest
204
+ from vellum.client import Vellum
205
+
206
+ client = Vellum(
207
+ api_key="YOUR_API_KEY",
208
+ )
209
+ client.ml_models.create(
210
+ name="name",
211
+ family="CAPYBARA",
212
+ exec_config=MlModelExecConfigRequest(
213
+ model_identifier="model_identifier",
214
+ base_url="base_url",
215
+ metadata={"key": "value"},
216
+ features=["TEXT"],
217
+ ),
218
+ )
219
+ """
220
+ _response = self._client_wrapper.httpx_client.request(
221
+ "v1/ml-models",
222
+ base_url=self._client_wrapper.get_environment().default,
223
+ method="POST",
224
+ json={
225
+ "name": name,
226
+ "family": family,
227
+ "hosted_by": hosted_by,
228
+ "developed_by": developed_by,
229
+ "build_config": build_config,
230
+ "exec_config": exec_config,
231
+ "parameter_config": parameter_config,
232
+ "display_config": display_config,
233
+ "visibility": visibility,
234
+ },
235
+ request_options=request_options,
236
+ omit=OMIT,
237
+ )
238
+ try:
239
+ if 200 <= _response.status_code < 300:
240
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
241
+ _response_json = _response.json()
242
+ except JSONDecodeError:
243
+ raise ApiError(status_code=_response.status_code, body=_response.text)
244
+ raise ApiError(status_code=_response.status_code, body=_response_json)
245
+
246
+ def retrieve(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> MlModelRead:
247
+ """
248
+ Retrieve an ML Model by its UUID.
249
+
250
+ Parameters
251
+ ----------
252
+ id : str
253
+ A UUID string identifying this ml model.
254
+
255
+ request_options : typing.Optional[RequestOptions]
256
+ Request-specific configuration.
257
+
258
+ Returns
259
+ -------
260
+ MlModelRead
261
+
262
+
263
+ Examples
264
+ --------
265
+ from vellum.client import Vellum
266
+
267
+ client = Vellum(
268
+ api_key="YOUR_API_KEY",
269
+ )
270
+ client.ml_models.retrieve(
271
+ id="id",
272
+ )
273
+ """
274
+ _response = self._client_wrapper.httpx_client.request(
275
+ f"v1/ml-models/{jsonable_encoder(id)}",
276
+ base_url=self._client_wrapper.get_environment().default,
277
+ method="GET",
278
+ request_options=request_options,
279
+ )
280
+ try:
281
+ if 200 <= _response.status_code < 300:
282
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
283
+ _response_json = _response.json()
284
+ except JSONDecodeError:
285
+ raise ApiError(status_code=_response.status_code, body=_response.text)
286
+ raise ApiError(status_code=_response.status_code, body=_response_json)
287
+
288
+ def update(
289
+ self,
290
+ id: str,
291
+ *,
292
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
293
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
294
+ request_options: typing.Optional[RequestOptions] = None,
295
+ ) -> MlModelRead:
296
+ """
297
+ Replace an ML Model with a new representation, keying off of its UUID.
298
+
299
+ Parameters
300
+ ----------
301
+ id : str
302
+ A UUID string identifying this ml model.
303
+
304
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
305
+ Configuration for how to display the ML Model.
306
+
307
+ visibility : typing.Optional[VisibilityEnum]
308
+ The visibility of the ML Model.
309
+
310
+ * `DEFAULT` - DEFAULT
311
+ * `PUBLIC` - PUBLIC
312
+ * `PRIVATE` - PRIVATE
313
+ * `DISABLED` - DISABLED
314
+
315
+ request_options : typing.Optional[RequestOptions]
316
+ Request-specific configuration.
317
+
318
+ Returns
319
+ -------
320
+ MlModelRead
321
+
322
+
323
+ Examples
324
+ --------
325
+ from vellum.client import Vellum
326
+
327
+ client = Vellum(
328
+ api_key="YOUR_API_KEY",
329
+ )
330
+ client.ml_models.update(
331
+ id="id",
332
+ )
333
+ """
334
+ _response = self._client_wrapper.httpx_client.request(
335
+ f"v1/ml-models/{jsonable_encoder(id)}",
336
+ base_url=self._client_wrapper.get_environment().default,
337
+ method="PUT",
338
+ json={"display_config": display_config, "visibility": visibility},
339
+ request_options=request_options,
340
+ omit=OMIT,
341
+ )
342
+ try:
343
+ if 200 <= _response.status_code < 300:
344
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
345
+ _response_json = _response.json()
346
+ except JSONDecodeError:
347
+ raise ApiError(status_code=_response.status_code, body=_response.text)
348
+ raise ApiError(status_code=_response.status_code, body=_response_json)
349
+
350
+ def partial_update(
351
+ self,
352
+ id: str,
353
+ *,
354
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
355
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
356
+ request_options: typing.Optional[RequestOptions] = None,
357
+ ) -> MlModelRead:
358
+ """
359
+ Partially update an ML Model, keying off of its UUID.
360
+
361
+ Parameters
362
+ ----------
363
+ id : str
364
+ A UUID string identifying this ml model.
365
+
366
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
367
+ Configuration for how to display the ML Model.
368
+
369
+ visibility : typing.Optional[VisibilityEnum]
370
+ The visibility of the ML Model.
371
+
372
+ * `DEFAULT` - DEFAULT
373
+ * `PUBLIC` - PUBLIC
374
+ * `PRIVATE` - PRIVATE
375
+ * `DISABLED` - DISABLED
376
+
377
+ request_options : typing.Optional[RequestOptions]
378
+ Request-specific configuration.
379
+
380
+ Returns
381
+ -------
382
+ MlModelRead
383
+
384
+
385
+ Examples
386
+ --------
387
+ from vellum.client import Vellum
388
+
389
+ client = Vellum(
390
+ api_key="YOUR_API_KEY",
391
+ )
392
+ client.ml_models.partial_update(
393
+ id="id",
394
+ )
395
+ """
396
+ _response = self._client_wrapper.httpx_client.request(
397
+ f"v1/ml-models/{jsonable_encoder(id)}",
398
+ base_url=self._client_wrapper.get_environment().default,
399
+ method="PATCH",
400
+ json={"display_config": display_config, "visibility": visibility},
401
+ request_options=request_options,
402
+ omit=OMIT,
403
+ )
404
+ try:
405
+ if 200 <= _response.status_code < 300:
406
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
407
+ _response_json = _response.json()
408
+ except JSONDecodeError:
409
+ raise ApiError(status_code=_response.status_code, body=_response.text)
410
+ raise ApiError(status_code=_response.status_code, body=_response_json)
411
+
412
+
413
+ class AsyncMlModelsClient:
414
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
415
+ self._client_wrapper = client_wrapper
416
+
417
+ async def list(
418
+ self,
419
+ *,
420
+ limit: typing.Optional[int] = None,
421
+ offset: typing.Optional[int] = None,
422
+ ordering: typing.Optional[str] = None,
423
+ request_options: typing.Optional[RequestOptions] = None,
424
+ ) -> PaginatedMlModelReadList:
425
+ """
426
+ List all ML Models that your Workspace has access to.
427
+
428
+ Parameters
429
+ ----------
430
+ limit : typing.Optional[int]
431
+ Number of results to return per page.
432
+
433
+ offset : typing.Optional[int]
434
+ The initial index from which to return the results.
435
+
436
+ ordering : typing.Optional[str]
437
+ Which field to use when ordering the results.
438
+
439
+ request_options : typing.Optional[RequestOptions]
440
+ Request-specific configuration.
441
+
442
+ Returns
443
+ -------
444
+ PaginatedMlModelReadList
445
+
446
+
447
+ Examples
448
+ --------
449
+ import asyncio
450
+
451
+ from vellum.client import AsyncVellum
452
+
453
+ client = AsyncVellum(
454
+ api_key="YOUR_API_KEY",
455
+ )
456
+
457
+
458
+ async def main() -> None:
459
+ await client.ml_models.list()
460
+
461
+
462
+ asyncio.run(main())
463
+ """
464
+ _response = await self._client_wrapper.httpx_client.request(
465
+ "v1/ml-models",
466
+ base_url=self._client_wrapper.get_environment().default,
467
+ method="GET",
468
+ params={"limit": limit, "offset": offset, "ordering": ordering},
469
+ request_options=request_options,
470
+ )
471
+ try:
472
+ if 200 <= _response.status_code < 300:
473
+ return pydantic_v1.parse_obj_as(PaginatedMlModelReadList, _response.json()) # type: ignore
474
+ _response_json = _response.json()
475
+ except JSONDecodeError:
476
+ raise ApiError(status_code=_response.status_code, body=_response.text)
477
+ raise ApiError(status_code=_response.status_code, body=_response_json)
478
+
479
+ async def create(
480
+ self,
481
+ *,
482
+ name: str,
483
+ family: MlModelFamily,
484
+ exec_config: MlModelExecConfigRequest,
485
+ hosted_by: typing.Optional[HostedByEnum] = OMIT,
486
+ developed_by: typing.Optional[MlModelDeveloper] = OMIT,
487
+ build_config: typing.Optional[MlModelBuildConfigRequest] = OMIT,
488
+ parameter_config: typing.Optional[MlModelParameterConfigRequest] = OMIT,
489
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
490
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
491
+ request_options: typing.Optional[RequestOptions] = None,
492
+ ) -> MlModelRead:
493
+ """
494
+ Creates a new ML Model.
495
+
496
+ Parameters
497
+ ----------
498
+ name : str
499
+ The unique name of the ML Model.
500
+
501
+ family : MlModelFamily
502
+ The family of the ML Model.
503
+
504
+ * `CAPYBARA` - Capybara
505
+ * `CHAT_GPT` - Chat GPT
506
+ * `CLAUDE` - Claude
507
+ * `COHERE` - Cohere
508
+ * `FALCON` - Falcon
509
+ * `GEMINI` - Gemini
510
+ * `GRANITE` - Granite
511
+ * `GPT3` - GPT-3
512
+ * `FIREWORKS` - Fireworks
513
+ * `LLAMA2` - Llama2
514
+ * `LLAMA3` - Llama3
515
+ * `MISTRAL` - Mistral
516
+ * `MPT` - MPT
517
+ * `OPENCHAT` - OpenChat
518
+ * `PALM` - PaLM
519
+ * `SOLAR` - Solar
520
+ * `TITAN` - Titan
521
+ * `WIZARD` - Wizard
522
+ * `YI` - Yi
523
+ * `ZEPHYR` - Zephyr
524
+
525
+ exec_config : MlModelExecConfigRequest
526
+ Configuration for how to execute the ML Model.
527
+
528
+ hosted_by : typing.Optional[HostedByEnum]
529
+ The organization hosting the ML Model.
530
+
531
+ * `ANTHROPIC` - ANTHROPIC
532
+ * `AWS_BEDROCK` - AWS_BEDROCK
533
+ * `AZURE_OPENAI` - AZURE_OPENAI
534
+ * `COHERE` - COHERE
535
+ * `CUSTOM` - CUSTOM
536
+ * `FIREWORKS_AI` - FIREWORKS_AI
537
+ * `GOOGLE` - GOOGLE
538
+ * `GOOGLE_VERTEX_AI` - GOOGLE_VERTEX_AI
539
+ * `GROQ` - GROQ
540
+ * `HUGGINGFACE` - HUGGINGFACE
541
+ * `IBM_WATSONX` - IBM_WATSONX
542
+ * `MOSAICML` - MOSAICML
543
+ * `MYSTIC` - MYSTIC
544
+ * `OPENAI` - OPENAI
545
+ * `OPENPIPE` - OPENPIPE
546
+ * `PYQ` - PYQ
547
+ * `REPLICATE` - REPLICATE
548
+
549
+ developed_by : typing.Optional[MlModelDeveloper]
550
+ The organization that developed the ML Model.
551
+
552
+ * `01_AI` - 01_AI
553
+ * `AMAZON` - AMAZON
554
+ * `ANTHROPIC` - ANTHROPIC
555
+ * `COHERE` - COHERE
556
+ * `ELUTHERAI` - ELUTHERAI
557
+ * `FIREWORKS_AI` - FIREWORKS_AI
558
+ * `GOOGLE` - GOOGLE
559
+ * `HUGGINGFACE` - HUGGINGFACE
560
+ * `IBM` - IBM
561
+ * `META` - META
562
+ * `MISTRAL_AI` - MISTRAL_AI
563
+ * `MOSAICML` - MOSAICML
564
+ * `NOUS_RESEARCH` - NOUS_RESEARCH
565
+ * `OPENAI` - OPENAI
566
+ * `OPENCHAT` - OPENCHAT
567
+ * `OPENPIPE` - OPENPIPE
568
+ * `TII` - TII
569
+ * `WIZARDLM` - WIZARDLM
570
+
571
+ build_config : typing.Optional[MlModelBuildConfigRequest]
572
+ Configuration for how the ML Model was built.
573
+
574
+ parameter_config : typing.Optional[MlModelParameterConfigRequest]
575
+ Configuration for the ML Model's parameters.
576
+
577
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
578
+ Configuration for how to display the ML Model.
579
+
580
+ visibility : typing.Optional[VisibilityEnum]
581
+ The visibility of the ML Model.
582
+
583
+ * `DEFAULT` - DEFAULT
584
+ * `PUBLIC` - PUBLIC
585
+ * `PRIVATE` - PRIVATE
586
+ * `DISABLED` - DISABLED
587
+
588
+ request_options : typing.Optional[RequestOptions]
589
+ Request-specific configuration.
590
+
591
+ Returns
592
+ -------
593
+ MlModelRead
594
+
595
+
596
+ Examples
597
+ --------
598
+ import asyncio
599
+
600
+ from vellum import MlModelExecConfigRequest
601
+ from vellum.client import AsyncVellum
602
+
603
+ client = AsyncVellum(
604
+ api_key="YOUR_API_KEY",
605
+ )
606
+
607
+
608
+ async def main() -> None:
609
+ await client.ml_models.create(
610
+ name="name",
611
+ family="CAPYBARA",
612
+ exec_config=MlModelExecConfigRequest(
613
+ model_identifier="model_identifier",
614
+ base_url="base_url",
615
+ metadata={"key": "value"},
616
+ features=["TEXT"],
617
+ ),
618
+ )
619
+
620
+
621
+ asyncio.run(main())
622
+ """
623
+ _response = await self._client_wrapper.httpx_client.request(
624
+ "v1/ml-models",
625
+ base_url=self._client_wrapper.get_environment().default,
626
+ method="POST",
627
+ json={
628
+ "name": name,
629
+ "family": family,
630
+ "hosted_by": hosted_by,
631
+ "developed_by": developed_by,
632
+ "build_config": build_config,
633
+ "exec_config": exec_config,
634
+ "parameter_config": parameter_config,
635
+ "display_config": display_config,
636
+ "visibility": visibility,
637
+ },
638
+ request_options=request_options,
639
+ omit=OMIT,
640
+ )
641
+ try:
642
+ if 200 <= _response.status_code < 300:
643
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
644
+ _response_json = _response.json()
645
+ except JSONDecodeError:
646
+ raise ApiError(status_code=_response.status_code, body=_response.text)
647
+ raise ApiError(status_code=_response.status_code, body=_response_json)
648
+
649
+ async def retrieve(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> MlModelRead:
650
+ """
651
+ Retrieve an ML Model by its UUID.
652
+
653
+ Parameters
654
+ ----------
655
+ id : str
656
+ A UUID string identifying this ml model.
657
+
658
+ request_options : typing.Optional[RequestOptions]
659
+ Request-specific configuration.
660
+
661
+ Returns
662
+ -------
663
+ MlModelRead
664
+
665
+
666
+ Examples
667
+ --------
668
+ import asyncio
669
+
670
+ from vellum.client import AsyncVellum
671
+
672
+ client = AsyncVellum(
673
+ api_key="YOUR_API_KEY",
674
+ )
675
+
676
+
677
+ async def main() -> None:
678
+ await client.ml_models.retrieve(
679
+ id="id",
680
+ )
681
+
682
+
683
+ asyncio.run(main())
684
+ """
685
+ _response = await self._client_wrapper.httpx_client.request(
686
+ f"v1/ml-models/{jsonable_encoder(id)}",
687
+ base_url=self._client_wrapper.get_environment().default,
688
+ method="GET",
689
+ request_options=request_options,
690
+ )
691
+ try:
692
+ if 200 <= _response.status_code < 300:
693
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
694
+ _response_json = _response.json()
695
+ except JSONDecodeError:
696
+ raise ApiError(status_code=_response.status_code, body=_response.text)
697
+ raise ApiError(status_code=_response.status_code, body=_response_json)
698
+
699
+ async def update(
700
+ self,
701
+ id: str,
702
+ *,
703
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
704
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
705
+ request_options: typing.Optional[RequestOptions] = None,
706
+ ) -> MlModelRead:
707
+ """
708
+ Replace an ML Model with a new representation, keying off of its UUID.
709
+
710
+ Parameters
711
+ ----------
712
+ id : str
713
+ A UUID string identifying this ml model.
714
+
715
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
716
+ Configuration for how to display the ML Model.
717
+
718
+ visibility : typing.Optional[VisibilityEnum]
719
+ The visibility of the ML Model.
720
+
721
+ * `DEFAULT` - DEFAULT
722
+ * `PUBLIC` - PUBLIC
723
+ * `PRIVATE` - PRIVATE
724
+ * `DISABLED` - DISABLED
725
+
726
+ request_options : typing.Optional[RequestOptions]
727
+ Request-specific configuration.
728
+
729
+ Returns
730
+ -------
731
+ MlModelRead
732
+
733
+
734
+ Examples
735
+ --------
736
+ import asyncio
737
+
738
+ from vellum.client import AsyncVellum
739
+
740
+ client = AsyncVellum(
741
+ api_key="YOUR_API_KEY",
742
+ )
743
+
744
+
745
+ async def main() -> None:
746
+ await client.ml_models.update(
747
+ id="id",
748
+ )
749
+
750
+
751
+ asyncio.run(main())
752
+ """
753
+ _response = await self._client_wrapper.httpx_client.request(
754
+ f"v1/ml-models/{jsonable_encoder(id)}",
755
+ base_url=self._client_wrapper.get_environment().default,
756
+ method="PUT",
757
+ json={"display_config": display_config, "visibility": visibility},
758
+ request_options=request_options,
759
+ omit=OMIT,
760
+ )
761
+ try:
762
+ if 200 <= _response.status_code < 300:
763
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
764
+ _response_json = _response.json()
765
+ except JSONDecodeError:
766
+ raise ApiError(status_code=_response.status_code, body=_response.text)
767
+ raise ApiError(status_code=_response.status_code, body=_response_json)
768
+
769
+ async def partial_update(
770
+ self,
771
+ id: str,
772
+ *,
773
+ display_config: typing.Optional[MlModelDisplayConfigRequest] = OMIT,
774
+ visibility: typing.Optional[VisibilityEnum] = OMIT,
775
+ request_options: typing.Optional[RequestOptions] = None,
776
+ ) -> MlModelRead:
777
+ """
778
+ Partially update an ML Model, keying off of its UUID.
779
+
780
+ Parameters
781
+ ----------
782
+ id : str
783
+ A UUID string identifying this ml model.
784
+
785
+ display_config : typing.Optional[MlModelDisplayConfigRequest]
786
+ Configuration for how to display the ML Model.
787
+
788
+ visibility : typing.Optional[VisibilityEnum]
789
+ The visibility of the ML Model.
790
+
791
+ * `DEFAULT` - DEFAULT
792
+ * `PUBLIC` - PUBLIC
793
+ * `PRIVATE` - PRIVATE
794
+ * `DISABLED` - DISABLED
795
+
796
+ request_options : typing.Optional[RequestOptions]
797
+ Request-specific configuration.
798
+
799
+ Returns
800
+ -------
801
+ MlModelRead
802
+
803
+
804
+ Examples
805
+ --------
806
+ import asyncio
807
+
808
+ from vellum.client import AsyncVellum
809
+
810
+ client = AsyncVellum(
811
+ api_key="YOUR_API_KEY",
812
+ )
813
+
814
+
815
+ async def main() -> None:
816
+ await client.ml_models.partial_update(
817
+ id="id",
818
+ )
819
+
820
+
821
+ asyncio.run(main())
822
+ """
823
+ _response = await self._client_wrapper.httpx_client.request(
824
+ f"v1/ml-models/{jsonable_encoder(id)}",
825
+ base_url=self._client_wrapper.get_environment().default,
826
+ method="PATCH",
827
+ json={"display_config": display_config, "visibility": visibility},
828
+ request_options=request_options,
829
+ omit=OMIT,
830
+ )
831
+ try:
832
+ if 200 <= _response.status_code < 300:
833
+ return pydantic_v1.parse_obj_as(MlModelRead, _response.json()) # type: ignore
834
+ _response_json = _response.json()
835
+ except JSONDecodeError:
836
+ raise ApiError(status_code=_response.status_code, body=_response.text)
837
+ raise ApiError(status_code=_response.status_code, body=_response_json)