vellum-ai 0.7.10__py3-none-any.whl → 0.8.4__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +7 -474
- vellum/client.py +751 -246
- vellum/core/__init__.py +19 -3
- vellum/core/client_wrapper.py +6 -7
- vellum/core/file.py +7 -2
- vellum/core/http_client.py +4 -2
- vellum/core/jsonable_encoder.py +30 -31
- vellum/core/pydantic_utilities.py +190 -12
- vellum/core/query_encoder.py +38 -13
- vellum/core/serialization.py +170 -0
- vellum/errors/bad_request_error.py +2 -3
- vellum/errors/forbidden_error.py +2 -3
- vellum/errors/internal_server_error.py +2 -3
- vellum/errors/not_found_error.py +2 -3
- vellum/lib/test_suites/resources.py +4 -4
- vellum/resources/deployments/client.py +194 -46
- vellum/resources/document_indexes/client.py +174 -76
- vellum/resources/documents/client.py +162 -44
- vellum/resources/folder_entities/client.py +12 -8
- vellum/resources/ml_models/client.py +99 -33
- vellum/resources/sandboxes/client.py +70 -20
- vellum/resources/test_suite_runs/client.py +80 -34
- vellum/resources/test_suites/client.py +95 -37
- vellum/resources/workflow_deployments/client.py +89 -27
- vellum/resources/workflow_sandboxes/client.py +22 -10
- vellum/types/__init__.py +48 -595
- vellum/types/api_node_result.py +12 -21
- vellum/types/api_node_result_data.py +12 -24
- vellum/types/array_chat_message_content.py +12 -21
- vellum/types/array_chat_message_content_item.py +4 -79
- vellum/types/array_chat_message_content_item_request.py +4 -81
- vellum/types/array_chat_message_content_request.py +12 -21
- vellum/types/array_variable_value_item.py +12 -154
- vellum/types/array_vellum_value_item.py +7 -154
- vellum/types/array_vellum_value_item_request.py +12 -154
- vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large.py +13 -22
- vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large_request.py +13 -22
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1.py +15 -22
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1_request.py +15 -22
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1.py +15 -22
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1_request.py +15 -22
- vellum/types/chat_history_input_request.py +13 -22
- vellum/types/chat_message.py +13 -23
- vellum/types/chat_message_content.py +5 -103
- vellum/types/chat_message_content_request.py +8 -106
- vellum/types/chat_message_request.py +13 -23
- vellum/types/code_execution_node_array_result.py +12 -21
- vellum/types/code_execution_node_chat_history_result.py +12 -21
- vellum/types/code_execution_node_error_result.py +12 -21
- vellum/types/code_execution_node_function_call_result.py +12 -21
- vellum/types/code_execution_node_json_result.py +13 -22
- vellum/types/code_execution_node_number_result.py +12 -21
- vellum/types/code_execution_node_result.py +12 -21
- vellum/types/code_execution_node_result_data.py +12 -22
- vellum/types/code_execution_node_result_output.py +16 -212
- vellum/types/code_execution_node_search_results_result.py +12 -21
- vellum/types/code_execution_node_string_result.py +12 -21
- vellum/types/compile_prompt_deployment_expand_meta_request.py +14 -24
- vellum/types/compile_prompt_meta.py +11 -21
- vellum/types/components_schemas_pdf_search_result_meta_source.py +5 -0
- vellum/types/components_schemas_pdf_search_result_meta_source_request.py +5 -0
- vellum/types/conditional_node_result.py +12 -21
- vellum/types/conditional_node_result_data.py +11 -21
- vellum/types/create_test_suite_test_case_request.py +15 -25
- vellum/types/deployment_provider_payload_response.py +12 -22
- vellum/types/deployment_provider_payload_response_payload.py +1 -1
- vellum/types/deployment_read.py +18 -27
- vellum/types/deployment_release_tag_deployment_history_item.py +11 -20
- vellum/types/deployment_release_tag_read.py +16 -26
- vellum/types/document_document_to_document_index.py +14 -24
- vellum/types/document_index_chunking.py +4 -84
- vellum/types/document_index_chunking_request.py +4 -82
- vellum/types/document_index_indexing_config.py +12 -22
- vellum/types/document_index_indexing_config_request.py +12 -22
- vellum/types/document_index_read.py +16 -25
- vellum/types/document_read.py +19 -28
- vellum/types/enriched_normalized_completion.py +17 -27
- vellum/types/error_variable_value.py +12 -21
- vellum/types/error_vellum_value.py +12 -21
- vellum/types/error_vellum_value_request.py +12 -21
- vellum/types/execute_prompt_event.py +5 -118
- vellum/types/execute_prompt_response.py +3 -63
- vellum/types/execute_workflow_response.py +11 -21
- vellum/types/execute_workflow_workflow_result_event.py +3 -60
- vellum/types/execution_array_vellum_value.py +13 -22
- vellum/types/execution_chat_history_vellum_value.py +13 -22
- vellum/types/execution_error_vellum_value.py +13 -22
- vellum/types/execution_function_call_vellum_value.py +13 -22
- vellum/types/execution_json_vellum_value.py +14 -23
- vellum/types/execution_number_vellum_value.py +13 -22
- vellum/types/execution_search_results_vellum_value.py +13 -22
- vellum/types/execution_string_vellum_value.py +13 -22
- vellum/types/execution_vellum_value.py +16 -220
- vellum/types/external_test_case_execution.py +12 -22
- vellum/types/external_test_case_execution_request.py +12 -22
- vellum/types/fulfilled_execute_prompt_event.py +13 -22
- vellum/types/fulfilled_execute_prompt_response.py +14 -23
- vellum/types/fulfilled_execute_workflow_workflow_result_event.py +13 -21
- vellum/types/fulfilled_prompt_execution_meta.py +11 -21
- vellum/types/fulfilled_workflow_node_result_event.py +14 -22
- vellum/types/function_call.py +12 -22
- vellum/types/function_call_chat_message_content.py +12 -21
- vellum/types/function_call_chat_message_content_request.py +12 -21
- vellum/types/function_call_chat_message_content_value.py +12 -22
- vellum/types/function_call_chat_message_content_value_request.py +12 -22
- vellum/types/function_call_request.py +12 -22
- vellum/types/function_call_variable_value.py +12 -21
- vellum/types/function_call_vellum_value.py +12 -21
- vellum/types/function_call_vellum_value_request.py +12 -21
- vellum/types/generate_options_request.py +12 -22
- vellum/types/generate_request.py +14 -24
- vellum/types/generate_result.py +13 -23
- vellum/types/generate_result_data.py +12 -22
- vellum/types/generate_result_error.py +12 -22
- vellum/types/generate_stream_response.py +12 -22
- vellum/types/generate_stream_result.py +12 -22
- vellum/types/generate_stream_result_data.py +12 -22
- vellum/types/hkunlp_instructor_xl_vectorizer.py +12 -21
- vellum/types/hkunlp_instructor_xl_vectorizer_request.py +12 -21
- vellum/types/hugging_face_tokenizer_config.py +12 -21
- vellum/types/hugging_face_tokenizer_config_request.py +12 -21
- vellum/types/image_chat_message_content.py +12 -21
- vellum/types/image_chat_message_content_request.py +12 -21
- vellum/types/image_variable_value.py +12 -21
- vellum/types/image_vellum_value.py +12 -21
- vellum/types/image_vellum_value_request.py +12 -21
- vellum/types/indexing_config_vectorizer.py +18 -181
- vellum/types/indexing_config_vectorizer_request.py +18 -181
- vellum/types/initiated_execute_prompt_event.py +12 -21
- vellum/types/initiated_prompt_execution_meta.py +11 -21
- vellum/types/initiated_workflow_node_result_event.py +14 -22
- vellum/types/instructor_vectorizer_config.py +11 -21
- vellum/types/instructor_vectorizer_config_request.py +11 -21
- vellum/types/json_input_request.py +14 -23
- vellum/types/json_variable_value.py +13 -22
- vellum/types/json_vellum_value.py +13 -22
- vellum/types/json_vellum_value_request.py +13 -22
- vellum/types/map_node_result.py +12 -21
- vellum/types/map_node_result_data.py +11 -21
- vellum/types/merge_node_result.py +12 -21
- vellum/types/merge_node_result_data.py +12 -22
- vellum/types/metadata_filter_config_request.py +12 -22
- vellum/types/metadata_filter_rule_request.py +15 -25
- vellum/types/metric_node_result.py +12 -20
- vellum/types/ml_model_developer_enum_value_label.py +12 -22
- vellum/types/ml_model_display_config_labelled.py +12 -22
- vellum/types/ml_model_display_config_request.py +12 -22
- vellum/types/ml_model_display_tag_enum_value_label.py +12 -22
- vellum/types/ml_model_exec_config.py +14 -24
- vellum/types/ml_model_exec_config_request.py +14 -24
- vellum/types/ml_model_family_enum_value_label.py +12 -22
- vellum/types/ml_model_parameter_config.py +13 -23
- vellum/types/ml_model_parameter_config_request.py +13 -23
- vellum/types/ml_model_read.py +23 -33
- vellum/types/ml_model_request_authorization_config.py +12 -22
- vellum/types/ml_model_request_authorization_config_request.py +12 -22
- vellum/types/ml_model_request_config.py +11 -21
- vellum/types/ml_model_request_config_request.py +11 -21
- vellum/types/ml_model_response_config.py +11 -21
- vellum/types/ml_model_response_config_request.py +11 -21
- vellum/types/ml_model_tokenizer_config.py +3 -55
- vellum/types/ml_model_tokenizer_config_request.py +3 -57
- vellum/types/ml_model_usage.py +11 -21
- vellum/types/named_scenario_input_chat_history_variable_value_request.py +12 -21
- vellum/types/named_scenario_input_json_variable_value_request.py +13 -22
- vellum/types/named_scenario_input_request.py +6 -81
- vellum/types/named_scenario_input_string_variable_value_request.py +12 -21
- vellum/types/named_test_case_array_variable_value.py +12 -21
- vellum/types/named_test_case_array_variable_value_request.py +12 -21
- vellum/types/named_test_case_chat_history_variable_value.py +12 -21
- vellum/types/named_test_case_chat_history_variable_value_request.py +12 -21
- vellum/types/named_test_case_error_variable_value.py +12 -21
- vellum/types/named_test_case_error_variable_value_request.py +12 -21
- vellum/types/named_test_case_function_call_variable_value.py +12 -21
- vellum/types/named_test_case_function_call_variable_value_request.py +12 -21
- vellum/types/named_test_case_json_variable_value.py +13 -22
- vellum/types/named_test_case_json_variable_value_request.py +13 -22
- vellum/types/named_test_case_number_variable_value.py +12 -21
- vellum/types/named_test_case_number_variable_value_request.py +12 -21
- vellum/types/named_test_case_search_results_variable_value.py +12 -21
- vellum/types/named_test_case_search_results_variable_value_request.py +12 -21
- vellum/types/named_test_case_string_variable_value.py +12 -21
- vellum/types/named_test_case_string_variable_value_request.py +12 -21
- vellum/types/named_test_case_variable_value.py +16 -212
- vellum/types/named_test_case_variable_value_request.py +16 -212
- vellum/types/node_input_compiled_array_value.py +12 -21
- vellum/types/node_input_compiled_chat_history_value.py +12 -21
- vellum/types/node_input_compiled_error_value.py +12 -21
- vellum/types/node_input_compiled_function_call.py +12 -21
- vellum/types/node_input_compiled_json_value.py +13 -22
- vellum/types/node_input_compiled_number_value.py +12 -21
- vellum/types/node_input_compiled_search_results_value.py +12 -21
- vellum/types/node_input_compiled_string_value.py +12 -21
- vellum/types/node_input_variable_compiled_value.py +16 -220
- vellum/types/node_output_compiled_array_value.py +12 -21
- vellum/types/node_output_compiled_chat_history_value.py +12 -21
- vellum/types/node_output_compiled_error_value.py +12 -21
- vellum/types/node_output_compiled_function_call_value.py +12 -21
- vellum/types/node_output_compiled_json_value.py +13 -22
- vellum/types/node_output_compiled_number_value.py +12 -21
- vellum/types/node_output_compiled_search_results_value.py +12 -21
- vellum/types/node_output_compiled_string_value.py +12 -21
- vellum/types/node_output_compiled_value.py +16 -221
- vellum/types/normalized_log_probs.py +11 -21
- vellum/types/normalized_token_log_probs.py +11 -21
- vellum/types/number_variable_value.py +12 -21
- vellum/types/number_vellum_value.py +12 -21
- vellum/types/number_vellum_value_request.py +12 -21
- vellum/types/open_ai_vectorizer_config.py +11 -21
- vellum/types/open_ai_vectorizer_config_request.py +11 -21
- vellum/types/open_ai_vectorizer_text_embedding_3_large.py +13 -22
- vellum/types/open_ai_vectorizer_text_embedding_3_large_request.py +13 -22
- vellum/types/open_ai_vectorizer_text_embedding_3_small.py +13 -22
- vellum/types/open_ai_vectorizer_text_embedding_3_small_request.py +13 -22
- vellum/types/open_ai_vectorizer_text_embedding_ada_002.py +13 -22
- vellum/types/open_ai_vectorizer_text_embedding_ada_002_request.py +13 -22
- vellum/types/open_api_array_property.py +18 -27
- vellum/types/open_api_array_property_request.py +18 -27
- vellum/types/open_api_boolean_property.py +12 -21
- vellum/types/open_api_boolean_property_request.py +12 -21
- vellum/types/open_api_const_property.py +12 -21
- vellum/types/open_api_const_property_request.py +12 -21
- vellum/types/open_api_integer_property.py +12 -21
- vellum/types/open_api_integer_property_request.py +12 -21
- vellum/types/open_api_number_property.py +12 -21
- vellum/types/open_api_number_property_request.py +12 -21
- vellum/types/open_api_object_property.py +19 -28
- vellum/types/open_api_object_property_request.py +19 -28
- vellum/types/open_api_one_of_property.py +15 -26
- vellum/types/open_api_one_of_property_request.py +15 -26
- vellum/types/open_api_property.py +20 -270
- vellum/types/open_api_property_request.py +20 -270
- vellum/types/open_api_ref_property.py +12 -21
- vellum/types/open_api_ref_property_request.py +12 -21
- vellum/types/open_api_string_property.py +12 -21
- vellum/types/open_api_string_property_request.py +12 -21
- vellum/types/paginated_document_index_read_list.py +11 -21
- vellum/types/paginated_ml_model_read_list.py +11 -21
- vellum/types/paginated_slim_deployment_read_list.py +11 -21
- vellum/types/paginated_slim_document_list.py +11 -21
- vellum/types/paginated_slim_workflow_deployment_list.py +11 -21
- vellum/types/paginated_test_suite_run_execution_list.py +11 -21
- vellum/types/paginated_test_suite_test_case_list.py +11 -21
- vellum/types/pdf_search_result_meta_source.py +14 -23
- vellum/types/pdf_search_result_meta_source_request.py +14 -23
- vellum/types/prompt_deployment_expand_meta_request_request.py +17 -27
- vellum/types/prompt_deployment_input_request.py +4 -83
- vellum/types/prompt_execution_meta.py +11 -21
- vellum/types/prompt_node_execution_meta.py +11 -21
- vellum/types/prompt_node_result.py +12 -21
- vellum/types/prompt_node_result_data.py +11 -21
- vellum/types/prompt_output.py +5 -102
- vellum/types/raw_prompt_execution_overrides_request.py +14 -24
- vellum/types/reducto_chunker_config.py +11 -21
- vellum/types/reducto_chunker_config_request.py +11 -21
- vellum/types/reducto_chunking.py +12 -21
- vellum/types/reducto_chunking_request.py +12 -21
- vellum/types/rejected_execute_prompt_event.py +13 -22
- vellum/types/rejected_execute_prompt_response.py +14 -23
- vellum/types/rejected_execute_workflow_workflow_result_event.py +13 -21
- vellum/types/rejected_prompt_execution_meta.py +11 -21
- vellum/types/rejected_workflow_node_result_event.py +14 -22
- vellum/types/replace_test_suite_test_case_request.py +16 -26
- vellum/types/sandbox_scenario.py +13 -23
- vellum/types/scenario_input.py +6 -81
- vellum/types/scenario_input_chat_history_variable_value.py +12 -21
- vellum/types/scenario_input_json_variable_value.py +13 -22
- vellum/types/scenario_input_string_variable_value.py +12 -21
- vellum/types/search_filters_request.py +13 -23
- vellum/types/search_node_result.py +12 -21
- vellum/types/search_node_result_data.py +12 -22
- vellum/types/search_request_options_request.py +17 -27
- vellum/types/search_response.py +12 -22
- vellum/types/search_result.py +15 -25
- vellum/types/search_result_document.py +15 -25
- vellum/types/search_result_document_request.py +15 -25
- vellum/types/search_result_merging_request.py +12 -22
- vellum/types/search_result_meta.py +13 -23
- vellum/types/search_result_meta_request.py +13 -23
- vellum/types/search_result_request.py +15 -25
- vellum/types/search_weights_request.py +13 -23
- vellum/types/sentence_chunker_config.py +11 -21
- vellum/types/sentence_chunker_config_request.py +11 -21
- vellum/types/sentence_chunking.py +12 -21
- vellum/types/sentence_chunking_request.py +12 -21
- vellum/types/slim_deployment_read.py +16 -25
- vellum/types/slim_document.py +24 -33
- vellum/types/slim_workflow_deployment.py +19 -28
- vellum/types/streaming_execute_prompt_event.py +13 -22
- vellum/types/streaming_prompt_execution_meta.py +11 -21
- vellum/types/streaming_workflow_node_result_event.py +14 -22
- vellum/types/string_chat_message_content.py +12 -21
- vellum/types/string_chat_message_content_request.py +12 -21
- vellum/types/string_input_request.py +13 -22
- vellum/types/string_variable_value.py +12 -21
- vellum/types/string_vellum_value.py +12 -21
- vellum/types/string_vellum_value_request.py +12 -21
- vellum/types/submit_completion_actual_request.py +18 -27
- vellum/types/submit_workflow_execution_actual_request.py +4 -95
- vellum/types/subworkflow_node_result.py +12 -21
- vellum/types/subworkflow_node_result_data.py +11 -21
- vellum/types/templating_node_array_result.py +12 -21
- vellum/types/templating_node_chat_history_result.py +12 -21
- vellum/types/templating_node_error_result.py +12 -21
- vellum/types/templating_node_function_call_result.py +12 -21
- vellum/types/templating_node_json_result.py +13 -22
- vellum/types/templating_node_number_result.py +12 -21
- vellum/types/templating_node_result.py +12 -21
- vellum/types/templating_node_result_data.py +12 -22
- vellum/types/templating_node_result_output.py +16 -212
- vellum/types/templating_node_search_results_result.py +12 -21
- vellum/types/templating_node_string_result.py +12 -21
- vellum/types/terminal_node_array_result.py +13 -22
- vellum/types/terminal_node_chat_history_result.py +13 -22
- vellum/types/terminal_node_error_result.py +13 -22
- vellum/types/terminal_node_function_call_result.py +13 -22
- vellum/types/terminal_node_json_result.py +14 -23
- vellum/types/terminal_node_number_result.py +13 -22
- vellum/types/terminal_node_result.py +12 -21
- vellum/types/terminal_node_result_data.py +12 -22
- vellum/types/terminal_node_result_output.py +16 -220
- vellum/types/terminal_node_search_results_result.py +13 -22
- vellum/types/terminal_node_string_result.py +13 -22
- vellum/types/test_case_array_variable_value.py +12 -21
- vellum/types/test_case_chat_history_variable_value.py +12 -21
- vellum/types/test_case_error_variable_value.py +12 -21
- vellum/types/test_case_function_call_variable_value.py +12 -21
- vellum/types/test_case_json_variable_value.py +13 -22
- vellum/types/test_case_number_variable_value.py +12 -21
- vellum/types/test_case_search_results_variable_value.py +12 -21
- vellum/types/test_case_string_variable_value.py +12 -21
- vellum/types/test_case_variable_value.py +16 -220
- vellum/types/test_suite_run_deployment_release_tag_exec_config.py +13 -22
- vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +13 -23
- vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +13 -23
- vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +13 -22
- vellum/types/test_suite_run_exec_config.py +4 -85
- vellum/types/test_suite_run_exec_config_request.py +6 -89
- vellum/types/test_suite_run_execution.py +12 -22
- vellum/types/test_suite_run_execution_array_output.py +12 -21
- vellum/types/test_suite_run_execution_chat_history_output.py +12 -21
- vellum/types/test_suite_run_execution_error_output.py +12 -21
- vellum/types/test_suite_run_execution_function_call_output.py +12 -21
- vellum/types/test_suite_run_execution_json_output.py +13 -22
- vellum/types/test_suite_run_execution_metric_definition.py +11 -21
- vellum/types/test_suite_run_execution_metric_result.py +12 -22
- vellum/types/test_suite_run_execution_number_output.py +12 -21
- vellum/types/test_suite_run_execution_output.py +16 -220
- vellum/types/test_suite_run_execution_search_results_output.py +12 -21
- vellum/types/test_suite_run_execution_string_output.py +12 -21
- vellum/types/test_suite_run_external_exec_config.py +14 -22
- vellum/types/test_suite_run_external_exec_config_data.py +12 -22
- vellum/types/test_suite_run_external_exec_config_data_request.py +12 -22
- vellum/types/test_suite_run_external_exec_config_request.py +14 -22
- vellum/types/test_suite_run_metric_error_output.py +13 -22
- vellum/types/test_suite_run_metric_number_output.py +12 -21
- vellum/types/test_suite_run_metric_output.py +4 -81
- vellum/types/test_suite_run_metric_string_output.py +12 -21
- vellum/types/test_suite_run_read.py +15 -24
- vellum/types/test_suite_run_test_suite.py +11 -21
- vellum/types/test_suite_run_workflow_release_tag_exec_config.py +13 -22
- vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +13 -23
- vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +13 -23
- vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +13 -22
- vellum/types/test_suite_test_case.py +11 -21
- vellum/types/test_suite_test_case_bulk_operation_request.py +8 -111
- vellum/types/test_suite_test_case_bulk_result.py +8 -110
- vellum/types/test_suite_test_case_create_bulk_operation_request.py +13 -22
- vellum/types/test_suite_test_case_created_bulk_result.py +12 -21
- vellum/types/test_suite_test_case_created_bulk_result_data.py +11 -21
- vellum/types/test_suite_test_case_delete_bulk_operation_data_request.py +11 -21
- vellum/types/test_suite_test_case_delete_bulk_operation_request.py +14 -23
- vellum/types/test_suite_test_case_deleted_bulk_result.py +13 -22
- vellum/types/test_suite_test_case_deleted_bulk_result_data.py +11 -21
- vellum/types/test_suite_test_case_rejected_bulk_result.py +14 -23
- vellum/types/test_suite_test_case_replace_bulk_operation_request.py +13 -22
- vellum/types/test_suite_test_case_replaced_bulk_result.py +13 -22
- vellum/types/test_suite_test_case_replaced_bulk_result_data.py +11 -21
- vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +13 -22
- vellum/types/tik_token_tokenizer_config.py +12 -21
- vellum/types/tik_token_tokenizer_config_request.py +12 -21
- vellum/types/token_overlapping_window_chunker_config.py +11 -21
- vellum/types/token_overlapping_window_chunker_config_request.py +11 -21
- vellum/types/token_overlapping_window_chunking.py +12 -21
- vellum/types/token_overlapping_window_chunking_request.py +12 -21
- vellum/types/upload_document_response.py +12 -22
- vellum/types/upsert_test_suite_test_case_request.py +16 -26
- vellum/types/vellum_error.py +12 -22
- vellum/types/vellum_error_request.py +12 -22
- vellum/types/vellum_image.py +12 -22
- vellum/types/vellum_image_request.py +12 -22
- vellum/types/vellum_variable.py +12 -22
- vellum/types/workflow_deployment_read.py +20 -29
- vellum/types/workflow_event_error.py +12 -22
- vellum/types/workflow_execution_actual_chat_history_request.py +18 -27
- vellum/types/workflow_execution_actual_json_request.py +18 -27
- vellum/types/workflow_execution_actual_string_request.py +18 -27
- vellum/types/workflow_execution_node_result_event.py +12 -21
- vellum/types/workflow_execution_workflow_result_event.py +12 -21
- vellum/types/workflow_expand_meta_request.py +12 -22
- vellum/types/workflow_node_result_data.py +22 -280
- vellum/types/workflow_node_result_event.py +8 -133
- vellum/types/workflow_output.py +18 -247
- vellum/types/workflow_output_array.py +13 -22
- vellum/types/workflow_output_chat_history.py +13 -22
- vellum/types/workflow_output_error.py +13 -22
- vellum/types/workflow_output_function_call.py +13 -22
- vellum/types/workflow_output_image.py +13 -22
- vellum/types/workflow_output_json.py +14 -23
- vellum/types/workflow_output_number.py +13 -22
- vellum/types/workflow_output_search_results.py +13 -22
- vellum/types/workflow_output_string.py +13 -22
- vellum/types/workflow_release_tag_read.py +15 -25
- vellum/types/workflow_release_tag_workflow_deployment_history_item.py +13 -22
- vellum/types/workflow_request_chat_history_input_request.py +13 -22
- vellum/types/workflow_request_input_request.py +8 -108
- vellum/types/workflow_request_json_input_request.py +14 -23
- vellum/types/workflow_request_number_input_request.py +13 -22
- vellum/types/workflow_request_string_input_request.py +13 -22
- vellum/types/workflow_result_event.py +14 -23
- vellum/types/workflow_result_event_output_data.py +16 -245
- vellum/types/workflow_result_event_output_data_array.py +14 -23
- vellum/types/workflow_result_event_output_data_chat_history.py +14 -23
- vellum/types/workflow_result_event_output_data_error.py +14 -23
- vellum/types/workflow_result_event_output_data_function_call.py +14 -23
- vellum/types/workflow_result_event_output_data_json.py +14 -23
- vellum/types/workflow_result_event_output_data_number.py +13 -22
- vellum/types/workflow_result_event_output_data_search_results.py +14 -23
- vellum/types/workflow_result_event_output_data_string.py +14 -23
- vellum/types/workflow_stream_event.py +3 -62
- vellum/version.py +0 -1
- {vellum_ai-0.7.10.dist-info → vellum_ai-0.8.4.dist-info}/METADATA +5 -4
- vellum_ai-0.8.4.dist-info/RECORD +507 -0
- vellum/types/search_result_meta_source.py +0 -36
- vellum/types/search_result_meta_source_request.py +0 -36
- vellum_ai-0.7.10.dist-info/RECORD +0 -506
- {vellum_ai-0.7.10.dist-info → vellum_ai-0.8.4.dist-info}/LICENSE +0 -0
- {vellum_ai-0.7.10.dist-info → vellum_ai-0.8.4.dist-info}/WHEEL +0 -0
vellum/client.py
CHANGED
@@ -1,48 +1,57 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
|
-
import json
|
4
3
|
import typing
|
5
|
-
from
|
6
|
-
|
4
|
+
from .environment import VellumEnvironment
|
7
5
|
import httpx
|
8
|
-
|
9
|
-
from .
|
10
|
-
from .
|
11
|
-
from .
|
6
|
+
from .core.client_wrapper import SyncClientWrapper
|
7
|
+
from .resources.deployments.client import DeploymentsClient
|
8
|
+
from .resources.document_indexes.client import DocumentIndexesClient
|
9
|
+
from .resources.documents.client import DocumentsClient
|
10
|
+
from .resources.folder_entities.client import FolderEntitiesClient
|
11
|
+
from .resources.ml_models.client import MlModelsClient
|
12
|
+
from .resources.sandboxes.client import SandboxesClient
|
13
|
+
from .resources.test_suite_runs.client import TestSuiteRunsClient
|
14
|
+
from .resources.test_suites.client import TestSuitesClient
|
15
|
+
from .resources.workflow_deployments.client import WorkflowDeploymentsClient
|
16
|
+
from .resources.workflow_sandboxes.client import WorkflowSandboxesClient
|
17
|
+
from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
|
18
|
+
from .types.prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
|
19
|
+
from .types.raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
|
12
20
|
from .core.request_options import RequestOptions
|
13
|
-
from .
|
21
|
+
from .types.execute_prompt_response import ExecutePromptResponse
|
22
|
+
from .core.pydantic_utilities import parse_obj_as
|
14
23
|
from .errors.bad_request_error import BadRequestError
|
15
24
|
from .errors.forbidden_error import ForbiddenError
|
16
|
-
from .errors.internal_server_error import InternalServerError
|
17
25
|
from .errors.not_found_error import NotFoundError
|
18
|
-
from .
|
19
|
-
from .
|
20
|
-
from .
|
21
|
-
from .resources.folder_entities.client import AsyncFolderEntitiesClient, FolderEntitiesClient
|
22
|
-
from .resources.ml_models.client import AsyncMlModelsClient, MlModelsClient
|
23
|
-
from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
|
24
|
-
from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient, TestSuiteRunsClient
|
25
|
-
from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
|
26
|
-
from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient, WorkflowDeploymentsClient
|
27
|
-
from .resources.workflow_sandboxes.client import AsyncWorkflowSandboxesClient, WorkflowSandboxesClient
|
26
|
+
from .errors.internal_server_error import InternalServerError
|
27
|
+
from json.decoder import JSONDecodeError
|
28
|
+
from .core.api_error import ApiError
|
28
29
|
from .types.execute_prompt_event import ExecutePromptEvent
|
29
|
-
|
30
|
+
import json
|
31
|
+
from .types.workflow_request_input_request import WorkflowRequestInputRequest
|
32
|
+
from .types.workflow_expand_meta_request import WorkflowExpandMetaRequest
|
30
33
|
from .types.execute_workflow_response import ExecuteWorkflowResponse
|
31
|
-
from .types.
|
34
|
+
from .types.workflow_execution_event_type import WorkflowExecutionEventType
|
35
|
+
from .types.workflow_stream_event import WorkflowStreamEvent
|
32
36
|
from .types.generate_request import GenerateRequest
|
37
|
+
from .types.generate_options_request import GenerateOptionsRequest
|
33
38
|
from .types.generate_response import GenerateResponse
|
34
39
|
from .types.generate_stream_response import GenerateStreamResponse
|
35
|
-
from .types.prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
|
36
|
-
from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
|
37
|
-
from .types.raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
|
38
40
|
from .types.search_request_options_request import SearchRequestOptionsRequest
|
39
41
|
from .types.search_response import SearchResponse
|
40
42
|
from .types.submit_completion_actual_request import SubmitCompletionActualRequest
|
41
43
|
from .types.submit_workflow_execution_actual_request import SubmitWorkflowExecutionActualRequest
|
42
|
-
from .
|
43
|
-
from .
|
44
|
-
from .
|
45
|
-
from .
|
44
|
+
from .core.client_wrapper import AsyncClientWrapper
|
45
|
+
from .resources.deployments.client import AsyncDeploymentsClient
|
46
|
+
from .resources.document_indexes.client import AsyncDocumentIndexesClient
|
47
|
+
from .resources.documents.client import AsyncDocumentsClient
|
48
|
+
from .resources.folder_entities.client import AsyncFolderEntitiesClient
|
49
|
+
from .resources.ml_models.client import AsyncMlModelsClient
|
50
|
+
from .resources.sandboxes.client import AsyncSandboxesClient
|
51
|
+
from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient
|
52
|
+
from .resources.test_suites.client import AsyncTestSuitesClient
|
53
|
+
from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient
|
54
|
+
from .resources.workflow_sandboxes.client import AsyncWorkflowSandboxesClient
|
46
55
|
|
47
56
|
# this is used as the default value for optional parameters
|
48
57
|
OMIT = typing.cast(typing.Any, ...)
|
@@ -75,7 +84,7 @@ class Vellum:
|
|
75
84
|
|
76
85
|
Examples
|
77
86
|
--------
|
78
|
-
from vellum
|
87
|
+
from vellum import Vellum
|
79
88
|
|
80
89
|
client = Vellum(
|
81
90
|
api_key="YOUR_API_KEY",
|
@@ -89,7 +98,7 @@ class Vellum:
|
|
89
98
|
api_key: str,
|
90
99
|
timeout: typing.Optional[float] = None,
|
91
100
|
follow_redirects: typing.Optional[bool] = True,
|
92
|
-
httpx_client: typing.Optional[httpx.Client] = None
|
101
|
+
httpx_client: typing.Optional[httpx.Client] = None,
|
93
102
|
):
|
94
103
|
_defaulted_timeout = timeout if timeout is not None else None if httpx_client is None else None
|
95
104
|
self._client_wrapper = SyncClientWrapper(
|
@@ -124,8 +133,8 @@ class Vellum:
|
|
124
133
|
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
|
125
134
|
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
126
135
|
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
127
|
-
metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
|
128
|
-
request_options: typing.Optional[RequestOptions] = None
|
136
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
137
|
+
request_options: typing.Optional[RequestOptions] = None,
|
129
138
|
) -> ExecutePromptResponse:
|
130
139
|
"""
|
131
140
|
Executes a deployed Prompt and returns the result.
|
@@ -156,7 +165,7 @@ class Vellum:
|
|
156
165
|
expand_raw : typing.Optional[typing.Sequence[str]]
|
157
166
|
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
158
167
|
|
159
|
-
metadata : typing.Optional[typing.Dict[str, typing.Any]]
|
168
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
160
169
|
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
161
170
|
|
162
171
|
request_options : typing.Optional[RequestOptions]
|
@@ -169,42 +178,18 @@ class Vellum:
|
|
169
178
|
|
170
179
|
Examples
|
171
180
|
--------
|
172
|
-
from vellum import
|
173
|
-
PromptDeploymentExpandMetaRequestRequest,
|
174
|
-
PromptDeploymentInputRequest_String,
|
175
|
-
RawPromptExecutionOverridesRequest,
|
176
|
-
)
|
177
|
-
from vellum.client import Vellum
|
181
|
+
from vellum import StringInputRequest, Vellum
|
178
182
|
|
179
183
|
client = Vellum(
|
180
184
|
api_key="YOUR_API_KEY",
|
181
185
|
)
|
182
186
|
client.execute_prompt(
|
183
187
|
inputs=[
|
184
|
-
|
185
|
-
name="
|
186
|
-
value="
|
188
|
+
StringInputRequest(
|
189
|
+
name="name",
|
190
|
+
value="value",
|
187
191
|
)
|
188
192
|
],
|
189
|
-
prompt_deployment_id="string",
|
190
|
-
prompt_deployment_name="string",
|
191
|
-
release_tag="string",
|
192
|
-
external_id="string",
|
193
|
-
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
194
|
-
model_name=True,
|
195
|
-
usage=True,
|
196
|
-
finish_reason=True,
|
197
|
-
latency=True,
|
198
|
-
deployment_release_tag=True,
|
199
|
-
prompt_version_id=True,
|
200
|
-
),
|
201
|
-
raw_overrides=RawPromptExecutionOverridesRequest(
|
202
|
-
body={"string": {"key": "value"}},
|
203
|
-
headers={"string": {"key": "value"}},
|
204
|
-
url="string",
|
205
|
-
),
|
206
|
-
expand_raw=["string"],
|
207
|
-
metadata={"string": {"key": "value"}},
|
208
193
|
)
|
209
194
|
"""
|
210
195
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -227,15 +212,53 @@ class Vellum:
|
|
227
212
|
)
|
228
213
|
try:
|
229
214
|
if 200 <= _response.status_code < 300:
|
230
|
-
return
|
215
|
+
return typing.cast(
|
216
|
+
ExecutePromptResponse,
|
217
|
+
parse_obj_as(
|
218
|
+
type_=ExecutePromptResponse, # type: ignore
|
219
|
+
object_=_response.json(),
|
220
|
+
),
|
221
|
+
)
|
231
222
|
if _response.status_code == 400:
|
232
|
-
raise BadRequestError(
|
223
|
+
raise BadRequestError(
|
224
|
+
typing.cast(
|
225
|
+
typing.Optional[typing.Any],
|
226
|
+
parse_obj_as(
|
227
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
228
|
+
object_=_response.json(),
|
229
|
+
),
|
230
|
+
)
|
231
|
+
)
|
233
232
|
if _response.status_code == 403:
|
234
|
-
raise ForbiddenError(
|
233
|
+
raise ForbiddenError(
|
234
|
+
typing.cast(
|
235
|
+
typing.Optional[typing.Any],
|
236
|
+
parse_obj_as(
|
237
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
238
|
+
object_=_response.json(),
|
239
|
+
),
|
240
|
+
)
|
241
|
+
)
|
235
242
|
if _response.status_code == 404:
|
236
|
-
raise NotFoundError(
|
243
|
+
raise NotFoundError(
|
244
|
+
typing.cast(
|
245
|
+
typing.Optional[typing.Any],
|
246
|
+
parse_obj_as(
|
247
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
248
|
+
object_=_response.json(),
|
249
|
+
),
|
250
|
+
)
|
251
|
+
)
|
237
252
|
if _response.status_code == 500:
|
238
|
-
raise InternalServerError(
|
253
|
+
raise InternalServerError(
|
254
|
+
typing.cast(
|
255
|
+
typing.Optional[typing.Any],
|
256
|
+
parse_obj_as(
|
257
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
258
|
+
object_=_response.json(),
|
259
|
+
),
|
260
|
+
)
|
261
|
+
)
|
239
262
|
_response_json = _response.json()
|
240
263
|
except JSONDecodeError:
|
241
264
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -252,8 +275,8 @@ class Vellum:
|
|
252
275
|
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
|
253
276
|
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
254
277
|
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
255
|
-
metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
|
256
|
-
request_options: typing.Optional[RequestOptions] = None
|
278
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
279
|
+
request_options: typing.Optional[RequestOptions] = None,
|
257
280
|
) -> typing.Iterator[ExecutePromptEvent]:
|
258
281
|
"""
|
259
282
|
Executes a deployed Prompt and streams back the results.
|
@@ -284,7 +307,7 @@ class Vellum:
|
|
284
307
|
expand_raw : typing.Optional[typing.Sequence[str]]
|
285
308
|
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
286
309
|
|
287
|
-
metadata : typing.Optional[typing.Dict[str, typing.Any]]
|
310
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
288
311
|
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
289
312
|
|
290
313
|
request_options : typing.Optional[RequestOptions]
|
@@ -299,17 +322,17 @@ class Vellum:
|
|
299
322
|
--------
|
300
323
|
from vellum import (
|
301
324
|
PromptDeploymentExpandMetaRequestRequest,
|
302
|
-
PromptDeploymentInputRequest_String,
|
303
325
|
RawPromptExecutionOverridesRequest,
|
326
|
+
StringInputRequest,
|
327
|
+
Vellum,
|
304
328
|
)
|
305
|
-
from vellum.client import Vellum
|
306
329
|
|
307
330
|
client = Vellum(
|
308
331
|
api_key="YOUR_API_KEY",
|
309
332
|
)
|
310
333
|
response = client.execute_prompt_stream(
|
311
334
|
inputs=[
|
312
|
-
|
335
|
+
StringInputRequest(
|
313
336
|
name="string",
|
314
337
|
value="string",
|
315
338
|
)
|
@@ -361,19 +384,57 @@ class Vellum:
|
|
361
384
|
try:
|
362
385
|
if len(_text) == 0:
|
363
386
|
continue
|
364
|
-
yield
|
387
|
+
yield typing.cast(
|
388
|
+
ExecutePromptEvent,
|
389
|
+
parse_obj_as(
|
390
|
+
type_=ExecutePromptEvent, # type: ignore
|
391
|
+
object_=json.loads(_text),
|
392
|
+
),
|
393
|
+
)
|
365
394
|
except:
|
366
395
|
pass
|
367
396
|
return
|
368
397
|
_response.read()
|
369
398
|
if _response.status_code == 400:
|
370
|
-
raise BadRequestError(
|
399
|
+
raise BadRequestError(
|
400
|
+
typing.cast(
|
401
|
+
typing.Optional[typing.Any],
|
402
|
+
parse_obj_as(
|
403
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
404
|
+
object_=_response.json(),
|
405
|
+
),
|
406
|
+
)
|
407
|
+
)
|
371
408
|
if _response.status_code == 403:
|
372
|
-
raise ForbiddenError(
|
409
|
+
raise ForbiddenError(
|
410
|
+
typing.cast(
|
411
|
+
typing.Optional[typing.Any],
|
412
|
+
parse_obj_as(
|
413
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
414
|
+
object_=_response.json(),
|
415
|
+
),
|
416
|
+
)
|
417
|
+
)
|
373
418
|
if _response.status_code == 404:
|
374
|
-
raise NotFoundError(
|
419
|
+
raise NotFoundError(
|
420
|
+
typing.cast(
|
421
|
+
typing.Optional[typing.Any],
|
422
|
+
parse_obj_as(
|
423
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
424
|
+
object_=_response.json(),
|
425
|
+
),
|
426
|
+
)
|
427
|
+
)
|
375
428
|
if _response.status_code == 500:
|
376
|
-
raise InternalServerError(
|
429
|
+
raise InternalServerError(
|
430
|
+
typing.cast(
|
431
|
+
typing.Optional[typing.Any],
|
432
|
+
parse_obj_as(
|
433
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
434
|
+
object_=_response.json(),
|
435
|
+
),
|
436
|
+
)
|
437
|
+
)
|
377
438
|
_response_json = _response.json()
|
378
439
|
except JSONDecodeError:
|
379
440
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -388,7 +449,7 @@ class Vellum:
|
|
388
449
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
389
450
|
release_tag: typing.Optional[str] = OMIT,
|
390
451
|
external_id: typing.Optional[str] = OMIT,
|
391
|
-
request_options: typing.Optional[RequestOptions] = None
|
452
|
+
request_options: typing.Optional[RequestOptions] = None,
|
392
453
|
) -> ExecuteWorkflowResponse:
|
393
454
|
"""
|
394
455
|
Executes a deployed Workflow and returns its outputs.
|
@@ -423,26 +484,18 @@ class Vellum:
|
|
423
484
|
|
424
485
|
Examples
|
425
486
|
--------
|
426
|
-
from vellum import
|
427
|
-
from vellum.client import Vellum
|
487
|
+
from vellum import Vellum, WorkflowRequestStringInputRequest
|
428
488
|
|
429
489
|
client = Vellum(
|
430
490
|
api_key="YOUR_API_KEY",
|
431
491
|
)
|
432
492
|
client.execute_workflow(
|
433
493
|
inputs=[
|
434
|
-
|
435
|
-
name="
|
436
|
-
value="
|
494
|
+
WorkflowRequestStringInputRequest(
|
495
|
+
name="name",
|
496
|
+
value="value",
|
437
497
|
)
|
438
498
|
],
|
439
|
-
expand_meta=WorkflowExpandMetaRequest(
|
440
|
-
usage=True,
|
441
|
-
),
|
442
|
-
workflow_deployment_id="string",
|
443
|
-
workflow_deployment_name="string",
|
444
|
-
release_tag="string",
|
445
|
-
external_id="string",
|
446
499
|
)
|
447
500
|
"""
|
448
501
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -462,13 +515,43 @@ class Vellum:
|
|
462
515
|
)
|
463
516
|
try:
|
464
517
|
if 200 <= _response.status_code < 300:
|
465
|
-
return
|
518
|
+
return typing.cast(
|
519
|
+
ExecuteWorkflowResponse,
|
520
|
+
parse_obj_as(
|
521
|
+
type_=ExecuteWorkflowResponse, # type: ignore
|
522
|
+
object_=_response.json(),
|
523
|
+
),
|
524
|
+
)
|
466
525
|
if _response.status_code == 400:
|
467
|
-
raise BadRequestError(
|
526
|
+
raise BadRequestError(
|
527
|
+
typing.cast(
|
528
|
+
typing.Optional[typing.Any],
|
529
|
+
parse_obj_as(
|
530
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
531
|
+
object_=_response.json(),
|
532
|
+
),
|
533
|
+
)
|
534
|
+
)
|
468
535
|
if _response.status_code == 404:
|
469
|
-
raise NotFoundError(
|
536
|
+
raise NotFoundError(
|
537
|
+
typing.cast(
|
538
|
+
typing.Optional[typing.Any],
|
539
|
+
parse_obj_as(
|
540
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
541
|
+
object_=_response.json(),
|
542
|
+
),
|
543
|
+
)
|
544
|
+
)
|
470
545
|
if _response.status_code == 500:
|
471
|
-
raise InternalServerError(
|
546
|
+
raise InternalServerError(
|
547
|
+
typing.cast(
|
548
|
+
typing.Optional[typing.Any],
|
549
|
+
parse_obj_as(
|
550
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
551
|
+
object_=_response.json(),
|
552
|
+
),
|
553
|
+
)
|
554
|
+
)
|
472
555
|
_response_json = _response.json()
|
473
556
|
except JSONDecodeError:
|
474
557
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -484,7 +567,7 @@ class Vellum:
|
|
484
567
|
release_tag: typing.Optional[str] = OMIT,
|
485
568
|
external_id: typing.Optional[str] = OMIT,
|
486
569
|
event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
|
487
|
-
request_options: typing.Optional[RequestOptions] = None
|
570
|
+
request_options: typing.Optional[RequestOptions] = None,
|
488
571
|
) -> typing.Iterator[WorkflowStreamEvent]:
|
489
572
|
"""
|
490
573
|
Executes a deployed Workflow and streams back its results.
|
@@ -522,15 +605,18 @@ class Vellum:
|
|
522
605
|
|
523
606
|
Examples
|
524
607
|
--------
|
525
|
-
from vellum import
|
526
|
-
|
608
|
+
from vellum import (
|
609
|
+
Vellum,
|
610
|
+
WorkflowExpandMetaRequest,
|
611
|
+
WorkflowRequestStringInputRequest,
|
612
|
+
)
|
527
613
|
|
528
614
|
client = Vellum(
|
529
615
|
api_key="YOUR_API_KEY",
|
530
616
|
)
|
531
617
|
response = client.execute_workflow_stream(
|
532
618
|
inputs=[
|
533
|
-
|
619
|
+
WorkflowRequestStringInputRequest(
|
534
620
|
name="string",
|
535
621
|
value="string",
|
536
622
|
)
|
@@ -569,17 +655,47 @@ class Vellum:
|
|
569
655
|
try:
|
570
656
|
if len(_text) == 0:
|
571
657
|
continue
|
572
|
-
yield
|
658
|
+
yield typing.cast(
|
659
|
+
WorkflowStreamEvent,
|
660
|
+
parse_obj_as(
|
661
|
+
type_=WorkflowStreamEvent, # type: ignore
|
662
|
+
object_=json.loads(_text),
|
663
|
+
),
|
664
|
+
)
|
573
665
|
except:
|
574
666
|
pass
|
575
667
|
return
|
576
668
|
_response.read()
|
577
669
|
if _response.status_code == 400:
|
578
|
-
raise BadRequestError(
|
670
|
+
raise BadRequestError(
|
671
|
+
typing.cast(
|
672
|
+
typing.Optional[typing.Any],
|
673
|
+
parse_obj_as(
|
674
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
675
|
+
object_=_response.json(),
|
676
|
+
),
|
677
|
+
)
|
678
|
+
)
|
579
679
|
if _response.status_code == 404:
|
580
|
-
raise NotFoundError(
|
680
|
+
raise NotFoundError(
|
681
|
+
typing.cast(
|
682
|
+
typing.Optional[typing.Any],
|
683
|
+
parse_obj_as(
|
684
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
685
|
+
object_=_response.json(),
|
686
|
+
),
|
687
|
+
)
|
688
|
+
)
|
581
689
|
if _response.status_code == 500:
|
582
|
-
raise InternalServerError(
|
690
|
+
raise InternalServerError(
|
691
|
+
typing.cast(
|
692
|
+
typing.Optional[typing.Any],
|
693
|
+
parse_obj_as(
|
694
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
695
|
+
object_=_response.json(),
|
696
|
+
),
|
697
|
+
)
|
698
|
+
)
|
583
699
|
_response_json = _response.json()
|
584
700
|
except JSONDecodeError:
|
585
701
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -592,7 +708,7 @@ class Vellum:
|
|
592
708
|
deployment_id: typing.Optional[str] = OMIT,
|
593
709
|
deployment_name: typing.Optional[str] = OMIT,
|
594
710
|
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
595
|
-
request_options: typing.Optional[RequestOptions] = None
|
711
|
+
request_options: typing.Optional[RequestOptions] = None,
|
596
712
|
) -> GenerateResponse:
|
597
713
|
"""
|
598
714
|
Generate a completion using a previously defined deployment.
|
@@ -624,8 +740,7 @@ class Vellum:
|
|
624
740
|
|
625
741
|
Examples
|
626
742
|
--------
|
627
|
-
from vellum import GenerateRequest
|
628
|
-
from vellum.client import Vellum
|
743
|
+
from vellum import GenerateRequest, Vellum
|
629
744
|
|
630
745
|
client = Vellum(
|
631
746
|
api_key="YOUR_API_KEY",
|
@@ -653,15 +768,53 @@ class Vellum:
|
|
653
768
|
)
|
654
769
|
try:
|
655
770
|
if 200 <= _response.status_code < 300:
|
656
|
-
return
|
771
|
+
return typing.cast(
|
772
|
+
GenerateResponse,
|
773
|
+
parse_obj_as(
|
774
|
+
type_=GenerateResponse, # type: ignore
|
775
|
+
object_=_response.json(),
|
776
|
+
),
|
777
|
+
)
|
657
778
|
if _response.status_code == 400:
|
658
|
-
raise BadRequestError(
|
779
|
+
raise BadRequestError(
|
780
|
+
typing.cast(
|
781
|
+
typing.Optional[typing.Any],
|
782
|
+
parse_obj_as(
|
783
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
784
|
+
object_=_response.json(),
|
785
|
+
),
|
786
|
+
)
|
787
|
+
)
|
659
788
|
if _response.status_code == 403:
|
660
|
-
raise ForbiddenError(
|
789
|
+
raise ForbiddenError(
|
790
|
+
typing.cast(
|
791
|
+
typing.Optional[typing.Any],
|
792
|
+
parse_obj_as(
|
793
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
794
|
+
object_=_response.json(),
|
795
|
+
),
|
796
|
+
)
|
797
|
+
)
|
661
798
|
if _response.status_code == 404:
|
662
|
-
raise NotFoundError(
|
799
|
+
raise NotFoundError(
|
800
|
+
typing.cast(
|
801
|
+
typing.Optional[typing.Any],
|
802
|
+
parse_obj_as(
|
803
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
804
|
+
object_=_response.json(),
|
805
|
+
),
|
806
|
+
)
|
807
|
+
)
|
663
808
|
if _response.status_code == 500:
|
664
|
-
raise InternalServerError(
|
809
|
+
raise InternalServerError(
|
810
|
+
typing.cast(
|
811
|
+
typing.Optional[typing.Any],
|
812
|
+
parse_obj_as(
|
813
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
814
|
+
object_=_response.json(),
|
815
|
+
),
|
816
|
+
)
|
817
|
+
)
|
665
818
|
_response_json = _response.json()
|
666
819
|
except JSONDecodeError:
|
667
820
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -674,7 +827,7 @@ class Vellum:
|
|
674
827
|
deployment_id: typing.Optional[str] = OMIT,
|
675
828
|
deployment_name: typing.Optional[str] = OMIT,
|
676
829
|
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
677
|
-
request_options: typing.Optional[RequestOptions] = None
|
830
|
+
request_options: typing.Optional[RequestOptions] = None,
|
678
831
|
) -> typing.Iterator[GenerateStreamResponse]:
|
679
832
|
"""
|
680
833
|
Generate a stream of completions using a previously defined deployment.
|
@@ -707,12 +860,12 @@ class Vellum:
|
|
707
860
|
Examples
|
708
861
|
--------
|
709
862
|
from vellum import (
|
710
|
-
ChatMessageContentRequest_String,
|
711
863
|
ChatMessageRequest,
|
712
864
|
GenerateOptionsRequest,
|
713
865
|
GenerateRequest,
|
866
|
+
StringChatMessageContentRequest,
|
867
|
+
Vellum,
|
714
868
|
)
|
715
|
-
from vellum.client import Vellum
|
716
869
|
|
717
870
|
client = Vellum(
|
718
871
|
api_key="YOUR_API_KEY",
|
@@ -727,7 +880,7 @@ class Vellum:
|
|
727
880
|
ChatMessageRequest(
|
728
881
|
text="string",
|
729
882
|
role="SYSTEM",
|
730
|
-
content=
|
883
|
+
content=StringChatMessageContentRequest(),
|
731
884
|
source="string",
|
732
885
|
)
|
733
886
|
],
|
@@ -760,19 +913,57 @@ class Vellum:
|
|
760
913
|
try:
|
761
914
|
if len(_text) == 0:
|
762
915
|
continue
|
763
|
-
yield
|
916
|
+
yield typing.cast(
|
917
|
+
GenerateStreamResponse,
|
918
|
+
parse_obj_as(
|
919
|
+
type_=GenerateStreamResponse, # type: ignore
|
920
|
+
object_=json.loads(_text),
|
921
|
+
),
|
922
|
+
)
|
764
923
|
except:
|
765
924
|
pass
|
766
925
|
return
|
767
926
|
_response.read()
|
768
927
|
if _response.status_code == 400:
|
769
|
-
raise BadRequestError(
|
928
|
+
raise BadRequestError(
|
929
|
+
typing.cast(
|
930
|
+
typing.Optional[typing.Any],
|
931
|
+
parse_obj_as(
|
932
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
933
|
+
object_=_response.json(),
|
934
|
+
),
|
935
|
+
)
|
936
|
+
)
|
770
937
|
if _response.status_code == 403:
|
771
|
-
raise ForbiddenError(
|
938
|
+
raise ForbiddenError(
|
939
|
+
typing.cast(
|
940
|
+
typing.Optional[typing.Any],
|
941
|
+
parse_obj_as(
|
942
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
943
|
+
object_=_response.json(),
|
944
|
+
),
|
945
|
+
)
|
946
|
+
)
|
772
947
|
if _response.status_code == 404:
|
773
|
-
raise NotFoundError(
|
948
|
+
raise NotFoundError(
|
949
|
+
typing.cast(
|
950
|
+
typing.Optional[typing.Any],
|
951
|
+
parse_obj_as(
|
952
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
953
|
+
object_=_response.json(),
|
954
|
+
),
|
955
|
+
)
|
956
|
+
)
|
774
957
|
if _response.status_code == 500:
|
775
|
-
raise InternalServerError(
|
958
|
+
raise InternalServerError(
|
959
|
+
typing.cast(
|
960
|
+
typing.Optional[typing.Any],
|
961
|
+
parse_obj_as(
|
962
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
963
|
+
object_=_response.json(),
|
964
|
+
),
|
965
|
+
)
|
966
|
+
)
|
776
967
|
_response_json = _response.json()
|
777
968
|
except JSONDecodeError:
|
778
969
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -785,7 +976,7 @@ class Vellum:
|
|
785
976
|
index_id: typing.Optional[str] = OMIT,
|
786
977
|
index_name: typing.Optional[str] = OMIT,
|
787
978
|
options: typing.Optional[SearchRequestOptionsRequest] = OMIT,
|
788
|
-
request_options: typing.Optional[RequestOptions] = None
|
979
|
+
request_options: typing.Optional[RequestOptions] = None,
|
789
980
|
) -> SearchResponse:
|
790
981
|
"""
|
791
982
|
Perform a search against a document index.
|
@@ -814,7 +1005,7 @@ class Vellum:
|
|
814
1005
|
|
815
1006
|
Examples
|
816
1007
|
--------
|
817
|
-
from vellum
|
1008
|
+
from vellum import Vellum
|
818
1009
|
|
819
1010
|
client = Vellum(
|
820
1011
|
api_key="YOUR_API_KEY",
|
@@ -827,19 +1018,54 @@ class Vellum:
|
|
827
1018
|
"v1/search",
|
828
1019
|
base_url=self._client_wrapper.get_environment().predict,
|
829
1020
|
method="POST",
|
830
|
-
json={
|
1021
|
+
json={
|
1022
|
+
"index_id": index_id,
|
1023
|
+
"index_name": index_name,
|
1024
|
+
"query": query,
|
1025
|
+
"options": options,
|
1026
|
+
},
|
831
1027
|
request_options=request_options,
|
832
1028
|
omit=OMIT,
|
833
1029
|
)
|
834
1030
|
try:
|
835
1031
|
if 200 <= _response.status_code < 300:
|
836
|
-
return
|
1032
|
+
return typing.cast(
|
1033
|
+
SearchResponse,
|
1034
|
+
parse_obj_as(
|
1035
|
+
type_=SearchResponse, # type: ignore
|
1036
|
+
object_=_response.json(),
|
1037
|
+
),
|
1038
|
+
)
|
837
1039
|
if _response.status_code == 400:
|
838
|
-
raise BadRequestError(
|
1040
|
+
raise BadRequestError(
|
1041
|
+
typing.cast(
|
1042
|
+
typing.Optional[typing.Any],
|
1043
|
+
parse_obj_as(
|
1044
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1045
|
+
object_=_response.json(),
|
1046
|
+
),
|
1047
|
+
)
|
1048
|
+
)
|
839
1049
|
if _response.status_code == 404:
|
840
|
-
raise NotFoundError(
|
1050
|
+
raise NotFoundError(
|
1051
|
+
typing.cast(
|
1052
|
+
typing.Optional[typing.Any],
|
1053
|
+
parse_obj_as(
|
1054
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1055
|
+
object_=_response.json(),
|
1056
|
+
),
|
1057
|
+
)
|
1058
|
+
)
|
841
1059
|
if _response.status_code == 500:
|
842
|
-
raise InternalServerError(
|
1060
|
+
raise InternalServerError(
|
1061
|
+
typing.cast(
|
1062
|
+
typing.Optional[typing.Any],
|
1063
|
+
parse_obj_as(
|
1064
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1065
|
+
object_=_response.json(),
|
1066
|
+
),
|
1067
|
+
)
|
1068
|
+
)
|
843
1069
|
_response_json = _response.json()
|
844
1070
|
except JSONDecodeError:
|
845
1071
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -851,7 +1077,7 @@ class Vellum:
|
|
851
1077
|
actuals: typing.Sequence[SubmitCompletionActualRequest],
|
852
1078
|
deployment_id: typing.Optional[str] = OMIT,
|
853
1079
|
deployment_name: typing.Optional[str] = OMIT,
|
854
|
-
request_options: typing.Optional[RequestOptions] = None
|
1080
|
+
request_options: typing.Optional[RequestOptions] = None,
|
855
1081
|
) -> None:
|
856
1082
|
"""
|
857
1083
|
Used to submit feedback regarding the quality of previously generated completions.
|
@@ -876,8 +1102,7 @@ class Vellum:
|
|
876
1102
|
|
877
1103
|
Examples
|
878
1104
|
--------
|
879
|
-
from vellum import SubmitCompletionActualRequest
|
880
|
-
from vellum.client import Vellum
|
1105
|
+
from vellum import SubmitCompletionActualRequest, Vellum
|
881
1106
|
|
882
1107
|
client = Vellum(
|
883
1108
|
api_key="YOUR_API_KEY",
|
@@ -890,7 +1115,11 @@ class Vellum:
|
|
890
1115
|
"v1/submit-completion-actuals",
|
891
1116
|
base_url=self._client_wrapper.get_environment().predict,
|
892
1117
|
method="POST",
|
893
|
-
json={
|
1118
|
+
json={
|
1119
|
+
"deployment_id": deployment_id,
|
1120
|
+
"deployment_name": deployment_name,
|
1121
|
+
"actuals": actuals,
|
1122
|
+
},
|
894
1123
|
request_options=request_options,
|
895
1124
|
omit=OMIT,
|
896
1125
|
)
|
@@ -898,11 +1127,35 @@ class Vellum:
|
|
898
1127
|
if 200 <= _response.status_code < 300:
|
899
1128
|
return
|
900
1129
|
if _response.status_code == 400:
|
901
|
-
raise BadRequestError(
|
1130
|
+
raise BadRequestError(
|
1131
|
+
typing.cast(
|
1132
|
+
typing.Optional[typing.Any],
|
1133
|
+
parse_obj_as(
|
1134
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1135
|
+
object_=_response.json(),
|
1136
|
+
),
|
1137
|
+
)
|
1138
|
+
)
|
902
1139
|
if _response.status_code == 404:
|
903
|
-
raise NotFoundError(
|
1140
|
+
raise NotFoundError(
|
1141
|
+
typing.cast(
|
1142
|
+
typing.Optional[typing.Any],
|
1143
|
+
parse_obj_as(
|
1144
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1145
|
+
object_=_response.json(),
|
1146
|
+
),
|
1147
|
+
)
|
1148
|
+
)
|
904
1149
|
if _response.status_code == 500:
|
905
|
-
raise InternalServerError(
|
1150
|
+
raise InternalServerError(
|
1151
|
+
typing.cast(
|
1152
|
+
typing.Optional[typing.Any],
|
1153
|
+
parse_obj_as(
|
1154
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1155
|
+
object_=_response.json(),
|
1156
|
+
),
|
1157
|
+
)
|
1158
|
+
)
|
906
1159
|
_response_json = _response.json()
|
907
1160
|
except JSONDecodeError:
|
908
1161
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -914,7 +1167,7 @@ class Vellum:
|
|
914
1167
|
actuals: typing.Sequence[SubmitWorkflowExecutionActualRequest],
|
915
1168
|
execution_id: typing.Optional[str] = OMIT,
|
916
1169
|
external_id: typing.Optional[str] = OMIT,
|
917
|
-
request_options: typing.Optional[RequestOptions] = None
|
1170
|
+
request_options: typing.Optional[RequestOptions] = None,
|
918
1171
|
) -> None:
|
919
1172
|
"""
|
920
1173
|
Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
@@ -941,20 +1194,24 @@ class Vellum:
|
|
941
1194
|
|
942
1195
|
Examples
|
943
1196
|
--------
|
944
|
-
from vellum
|
1197
|
+
from vellum import Vellum, WorkflowExecutionActualStringRequest
|
945
1198
|
|
946
1199
|
client = Vellum(
|
947
1200
|
api_key="YOUR_API_KEY",
|
948
1201
|
)
|
949
1202
|
client.submit_workflow_execution_actuals(
|
950
|
-
actuals=[],
|
1203
|
+
actuals=[WorkflowExecutionActualStringRequest()],
|
951
1204
|
)
|
952
1205
|
"""
|
953
1206
|
_response = self._client_wrapper.httpx_client.request(
|
954
1207
|
"v1/submit-workflow-execution-actuals",
|
955
1208
|
base_url=self._client_wrapper.get_environment().predict,
|
956
1209
|
method="POST",
|
957
|
-
json={
|
1210
|
+
json={
|
1211
|
+
"actuals": actuals,
|
1212
|
+
"execution_id": execution_id,
|
1213
|
+
"external_id": external_id,
|
1214
|
+
},
|
958
1215
|
request_options=request_options,
|
959
1216
|
omit=OMIT,
|
960
1217
|
)
|
@@ -994,7 +1251,7 @@ class AsyncVellum:
|
|
994
1251
|
|
995
1252
|
Examples
|
996
1253
|
--------
|
997
|
-
from vellum
|
1254
|
+
from vellum import AsyncVellum
|
998
1255
|
|
999
1256
|
client = AsyncVellum(
|
1000
1257
|
api_key="YOUR_API_KEY",
|
@@ -1008,7 +1265,7 @@ class AsyncVellum:
|
|
1008
1265
|
api_key: str,
|
1009
1266
|
timeout: typing.Optional[float] = None,
|
1010
1267
|
follow_redirects: typing.Optional[bool] = True,
|
1011
|
-
httpx_client: typing.Optional[httpx.AsyncClient] = None
|
1268
|
+
httpx_client: typing.Optional[httpx.AsyncClient] = None,
|
1012
1269
|
):
|
1013
1270
|
_defaulted_timeout = timeout if timeout is not None else None if httpx_client is None else None
|
1014
1271
|
self._client_wrapper = AsyncClientWrapper(
|
@@ -1043,8 +1300,8 @@ class AsyncVellum:
|
|
1043
1300
|
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
|
1044
1301
|
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
1045
1302
|
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
1046
|
-
metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
|
1047
|
-
request_options: typing.Optional[RequestOptions] = None
|
1303
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
1304
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1048
1305
|
) -> ExecutePromptResponse:
|
1049
1306
|
"""
|
1050
1307
|
Executes a deployed Prompt and returns the result.
|
@@ -1075,7 +1332,7 @@ class AsyncVellum:
|
|
1075
1332
|
expand_raw : typing.Optional[typing.Sequence[str]]
|
1076
1333
|
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
1077
1334
|
|
1078
|
-
metadata : typing.Optional[typing.Dict[str, typing.Any]]
|
1335
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
1079
1336
|
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1080
1337
|
|
1081
1338
|
request_options : typing.Optional[RequestOptions]
|
@@ -1090,12 +1347,7 @@ class AsyncVellum:
|
|
1090
1347
|
--------
|
1091
1348
|
import asyncio
|
1092
1349
|
|
1093
|
-
from vellum import
|
1094
|
-
PromptDeploymentExpandMetaRequestRequest,
|
1095
|
-
PromptDeploymentInputRequest_String,
|
1096
|
-
RawPromptExecutionOverridesRequest,
|
1097
|
-
)
|
1098
|
-
from vellum.client import AsyncVellum
|
1350
|
+
from vellum import AsyncVellum, StringInputRequest
|
1099
1351
|
|
1100
1352
|
client = AsyncVellum(
|
1101
1353
|
api_key="YOUR_API_KEY",
|
@@ -1105,30 +1357,11 @@ class AsyncVellum:
|
|
1105
1357
|
async def main() -> None:
|
1106
1358
|
await client.execute_prompt(
|
1107
1359
|
inputs=[
|
1108
|
-
|
1109
|
-
name="
|
1110
|
-
value="
|
1360
|
+
StringInputRequest(
|
1361
|
+
name="name",
|
1362
|
+
value="value",
|
1111
1363
|
)
|
1112
1364
|
],
|
1113
|
-
prompt_deployment_id="string",
|
1114
|
-
prompt_deployment_name="string",
|
1115
|
-
release_tag="string",
|
1116
|
-
external_id="string",
|
1117
|
-
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
1118
|
-
model_name=True,
|
1119
|
-
usage=True,
|
1120
|
-
finish_reason=True,
|
1121
|
-
latency=True,
|
1122
|
-
deployment_release_tag=True,
|
1123
|
-
prompt_version_id=True,
|
1124
|
-
),
|
1125
|
-
raw_overrides=RawPromptExecutionOverridesRequest(
|
1126
|
-
body={"string": {"key": "value"}},
|
1127
|
-
headers={"string": {"key": "value"}},
|
1128
|
-
url="string",
|
1129
|
-
),
|
1130
|
-
expand_raw=["string"],
|
1131
|
-
metadata={"string": {"key": "value"}},
|
1132
1365
|
)
|
1133
1366
|
|
1134
1367
|
|
@@ -1154,15 +1387,53 @@ class AsyncVellum:
|
|
1154
1387
|
)
|
1155
1388
|
try:
|
1156
1389
|
if 200 <= _response.status_code < 300:
|
1157
|
-
return
|
1390
|
+
return typing.cast(
|
1391
|
+
ExecutePromptResponse,
|
1392
|
+
parse_obj_as(
|
1393
|
+
type_=ExecutePromptResponse, # type: ignore
|
1394
|
+
object_=_response.json(),
|
1395
|
+
),
|
1396
|
+
)
|
1158
1397
|
if _response.status_code == 400:
|
1159
|
-
raise BadRequestError(
|
1398
|
+
raise BadRequestError(
|
1399
|
+
typing.cast(
|
1400
|
+
typing.Optional[typing.Any],
|
1401
|
+
parse_obj_as(
|
1402
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1403
|
+
object_=_response.json(),
|
1404
|
+
),
|
1405
|
+
)
|
1406
|
+
)
|
1160
1407
|
if _response.status_code == 403:
|
1161
|
-
raise ForbiddenError(
|
1408
|
+
raise ForbiddenError(
|
1409
|
+
typing.cast(
|
1410
|
+
typing.Optional[typing.Any],
|
1411
|
+
parse_obj_as(
|
1412
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1413
|
+
object_=_response.json(),
|
1414
|
+
),
|
1415
|
+
)
|
1416
|
+
)
|
1162
1417
|
if _response.status_code == 404:
|
1163
|
-
raise NotFoundError(
|
1418
|
+
raise NotFoundError(
|
1419
|
+
typing.cast(
|
1420
|
+
typing.Optional[typing.Any],
|
1421
|
+
parse_obj_as(
|
1422
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1423
|
+
object_=_response.json(),
|
1424
|
+
),
|
1425
|
+
)
|
1426
|
+
)
|
1164
1427
|
if _response.status_code == 500:
|
1165
|
-
raise InternalServerError(
|
1428
|
+
raise InternalServerError(
|
1429
|
+
typing.cast(
|
1430
|
+
typing.Optional[typing.Any],
|
1431
|
+
parse_obj_as(
|
1432
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1433
|
+
object_=_response.json(),
|
1434
|
+
),
|
1435
|
+
)
|
1436
|
+
)
|
1166
1437
|
_response_json = _response.json()
|
1167
1438
|
except JSONDecodeError:
|
1168
1439
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1179,8 +1450,8 @@ class AsyncVellum:
|
|
1179
1450
|
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest] = OMIT,
|
1180
1451
|
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
1181
1452
|
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
1182
|
-
metadata: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
|
1183
|
-
request_options: typing.Optional[RequestOptions] = None
|
1453
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
1454
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1184
1455
|
) -> typing.AsyncIterator[ExecutePromptEvent]:
|
1185
1456
|
"""
|
1186
1457
|
Executes a deployed Prompt and streams back the results.
|
@@ -1211,7 +1482,7 @@ class AsyncVellum:
|
|
1211
1482
|
expand_raw : typing.Optional[typing.Sequence[str]]
|
1212
1483
|
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
1213
1484
|
|
1214
|
-
metadata : typing.Optional[typing.Dict[str, typing.Any]]
|
1485
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
1215
1486
|
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1216
1487
|
|
1217
1488
|
request_options : typing.Optional[RequestOptions]
|
@@ -1227,11 +1498,11 @@ class AsyncVellum:
|
|
1227
1498
|
import asyncio
|
1228
1499
|
|
1229
1500
|
from vellum import (
|
1501
|
+
AsyncVellum,
|
1230
1502
|
PromptDeploymentExpandMetaRequestRequest,
|
1231
|
-
PromptDeploymentInputRequest_String,
|
1232
1503
|
RawPromptExecutionOverridesRequest,
|
1504
|
+
StringInputRequest,
|
1233
1505
|
)
|
1234
|
-
from vellum.client import AsyncVellum
|
1235
1506
|
|
1236
1507
|
client = AsyncVellum(
|
1237
1508
|
api_key="YOUR_API_KEY",
|
@@ -1241,7 +1512,7 @@ class AsyncVellum:
|
|
1241
1512
|
async def main() -> None:
|
1242
1513
|
response = await client.execute_prompt_stream(
|
1243
1514
|
inputs=[
|
1244
|
-
|
1515
|
+
StringInputRequest(
|
1245
1516
|
name="string",
|
1246
1517
|
value="string",
|
1247
1518
|
)
|
@@ -1296,19 +1567,57 @@ class AsyncVellum:
|
|
1296
1567
|
try:
|
1297
1568
|
if len(_text) == 0:
|
1298
1569
|
continue
|
1299
|
-
yield
|
1570
|
+
yield typing.cast(
|
1571
|
+
ExecutePromptEvent,
|
1572
|
+
parse_obj_as(
|
1573
|
+
type_=ExecutePromptEvent, # type: ignore
|
1574
|
+
object_=json.loads(_text),
|
1575
|
+
),
|
1576
|
+
)
|
1300
1577
|
except:
|
1301
1578
|
pass
|
1302
1579
|
return
|
1303
1580
|
await _response.aread()
|
1304
1581
|
if _response.status_code == 400:
|
1305
|
-
raise BadRequestError(
|
1582
|
+
raise BadRequestError(
|
1583
|
+
typing.cast(
|
1584
|
+
typing.Optional[typing.Any],
|
1585
|
+
parse_obj_as(
|
1586
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1587
|
+
object_=_response.json(),
|
1588
|
+
),
|
1589
|
+
)
|
1590
|
+
)
|
1306
1591
|
if _response.status_code == 403:
|
1307
|
-
raise ForbiddenError(
|
1592
|
+
raise ForbiddenError(
|
1593
|
+
typing.cast(
|
1594
|
+
typing.Optional[typing.Any],
|
1595
|
+
parse_obj_as(
|
1596
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1597
|
+
object_=_response.json(),
|
1598
|
+
),
|
1599
|
+
)
|
1600
|
+
)
|
1308
1601
|
if _response.status_code == 404:
|
1309
|
-
raise NotFoundError(
|
1602
|
+
raise NotFoundError(
|
1603
|
+
typing.cast(
|
1604
|
+
typing.Optional[typing.Any],
|
1605
|
+
parse_obj_as(
|
1606
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1607
|
+
object_=_response.json(),
|
1608
|
+
),
|
1609
|
+
)
|
1610
|
+
)
|
1310
1611
|
if _response.status_code == 500:
|
1311
|
-
raise InternalServerError(
|
1612
|
+
raise InternalServerError(
|
1613
|
+
typing.cast(
|
1614
|
+
typing.Optional[typing.Any],
|
1615
|
+
parse_obj_as(
|
1616
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1617
|
+
object_=_response.json(),
|
1618
|
+
),
|
1619
|
+
)
|
1620
|
+
)
|
1312
1621
|
_response_json = _response.json()
|
1313
1622
|
except JSONDecodeError:
|
1314
1623
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1323,7 +1632,7 @@ class AsyncVellum:
|
|
1323
1632
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
1324
1633
|
release_tag: typing.Optional[str] = OMIT,
|
1325
1634
|
external_id: typing.Optional[str] = OMIT,
|
1326
|
-
request_options: typing.Optional[RequestOptions] = None
|
1635
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1327
1636
|
) -> ExecuteWorkflowResponse:
|
1328
1637
|
"""
|
1329
1638
|
Executes a deployed Workflow and returns its outputs.
|
@@ -1360,8 +1669,7 @@ class AsyncVellum:
|
|
1360
1669
|
--------
|
1361
1670
|
import asyncio
|
1362
1671
|
|
1363
|
-
from vellum import
|
1364
|
-
from vellum.client import AsyncVellum
|
1672
|
+
from vellum import AsyncVellum, WorkflowRequestStringInputRequest
|
1365
1673
|
|
1366
1674
|
client = AsyncVellum(
|
1367
1675
|
api_key="YOUR_API_KEY",
|
@@ -1371,18 +1679,11 @@ class AsyncVellum:
|
|
1371
1679
|
async def main() -> None:
|
1372
1680
|
await client.execute_workflow(
|
1373
1681
|
inputs=[
|
1374
|
-
|
1375
|
-
name="
|
1376
|
-
value="
|
1682
|
+
WorkflowRequestStringInputRequest(
|
1683
|
+
name="name",
|
1684
|
+
value="value",
|
1377
1685
|
)
|
1378
1686
|
],
|
1379
|
-
expand_meta=WorkflowExpandMetaRequest(
|
1380
|
-
usage=True,
|
1381
|
-
),
|
1382
|
-
workflow_deployment_id="string",
|
1383
|
-
workflow_deployment_name="string",
|
1384
|
-
release_tag="string",
|
1385
|
-
external_id="string",
|
1386
1687
|
)
|
1387
1688
|
|
1388
1689
|
|
@@ -1405,13 +1706,43 @@ class AsyncVellum:
|
|
1405
1706
|
)
|
1406
1707
|
try:
|
1407
1708
|
if 200 <= _response.status_code < 300:
|
1408
|
-
return
|
1709
|
+
return typing.cast(
|
1710
|
+
ExecuteWorkflowResponse,
|
1711
|
+
parse_obj_as(
|
1712
|
+
type_=ExecuteWorkflowResponse, # type: ignore
|
1713
|
+
object_=_response.json(),
|
1714
|
+
),
|
1715
|
+
)
|
1409
1716
|
if _response.status_code == 400:
|
1410
|
-
raise BadRequestError(
|
1717
|
+
raise BadRequestError(
|
1718
|
+
typing.cast(
|
1719
|
+
typing.Optional[typing.Any],
|
1720
|
+
parse_obj_as(
|
1721
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1722
|
+
object_=_response.json(),
|
1723
|
+
),
|
1724
|
+
)
|
1725
|
+
)
|
1411
1726
|
if _response.status_code == 404:
|
1412
|
-
raise NotFoundError(
|
1727
|
+
raise NotFoundError(
|
1728
|
+
typing.cast(
|
1729
|
+
typing.Optional[typing.Any],
|
1730
|
+
parse_obj_as(
|
1731
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1732
|
+
object_=_response.json(),
|
1733
|
+
),
|
1734
|
+
)
|
1735
|
+
)
|
1413
1736
|
if _response.status_code == 500:
|
1414
|
-
raise InternalServerError(
|
1737
|
+
raise InternalServerError(
|
1738
|
+
typing.cast(
|
1739
|
+
typing.Optional[typing.Any],
|
1740
|
+
parse_obj_as(
|
1741
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1742
|
+
object_=_response.json(),
|
1743
|
+
),
|
1744
|
+
)
|
1745
|
+
)
|
1415
1746
|
_response_json = _response.json()
|
1416
1747
|
except JSONDecodeError:
|
1417
1748
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1427,7 +1758,7 @@ class AsyncVellum:
|
|
1427
1758
|
release_tag: typing.Optional[str] = OMIT,
|
1428
1759
|
external_id: typing.Optional[str] = OMIT,
|
1429
1760
|
event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
|
1430
|
-
request_options: typing.Optional[RequestOptions] = None
|
1761
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1431
1762
|
) -> typing.AsyncIterator[WorkflowStreamEvent]:
|
1432
1763
|
"""
|
1433
1764
|
Executes a deployed Workflow and streams back its results.
|
@@ -1467,8 +1798,11 @@ class AsyncVellum:
|
|
1467
1798
|
--------
|
1468
1799
|
import asyncio
|
1469
1800
|
|
1470
|
-
from vellum import
|
1471
|
-
|
1801
|
+
from vellum import (
|
1802
|
+
AsyncVellum,
|
1803
|
+
WorkflowExpandMetaRequest,
|
1804
|
+
WorkflowRequestStringInputRequest,
|
1805
|
+
)
|
1472
1806
|
|
1473
1807
|
client = AsyncVellum(
|
1474
1808
|
api_key="YOUR_API_KEY",
|
@@ -1478,7 +1812,7 @@ class AsyncVellum:
|
|
1478
1812
|
async def main() -> None:
|
1479
1813
|
response = await client.execute_workflow_stream(
|
1480
1814
|
inputs=[
|
1481
|
-
|
1815
|
+
WorkflowRequestStringInputRequest(
|
1482
1816
|
name="string",
|
1483
1817
|
value="string",
|
1484
1818
|
)
|
@@ -1520,17 +1854,47 @@ class AsyncVellum:
|
|
1520
1854
|
try:
|
1521
1855
|
if len(_text) == 0:
|
1522
1856
|
continue
|
1523
|
-
yield
|
1857
|
+
yield typing.cast(
|
1858
|
+
WorkflowStreamEvent,
|
1859
|
+
parse_obj_as(
|
1860
|
+
type_=WorkflowStreamEvent, # type: ignore
|
1861
|
+
object_=json.loads(_text),
|
1862
|
+
),
|
1863
|
+
)
|
1524
1864
|
except:
|
1525
1865
|
pass
|
1526
1866
|
return
|
1527
1867
|
await _response.aread()
|
1528
1868
|
if _response.status_code == 400:
|
1529
|
-
raise BadRequestError(
|
1869
|
+
raise BadRequestError(
|
1870
|
+
typing.cast(
|
1871
|
+
typing.Optional[typing.Any],
|
1872
|
+
parse_obj_as(
|
1873
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1874
|
+
object_=_response.json(),
|
1875
|
+
),
|
1876
|
+
)
|
1877
|
+
)
|
1530
1878
|
if _response.status_code == 404:
|
1531
|
-
raise NotFoundError(
|
1879
|
+
raise NotFoundError(
|
1880
|
+
typing.cast(
|
1881
|
+
typing.Optional[typing.Any],
|
1882
|
+
parse_obj_as(
|
1883
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1884
|
+
object_=_response.json(),
|
1885
|
+
),
|
1886
|
+
)
|
1887
|
+
)
|
1532
1888
|
if _response.status_code == 500:
|
1533
|
-
raise InternalServerError(
|
1889
|
+
raise InternalServerError(
|
1890
|
+
typing.cast(
|
1891
|
+
typing.Optional[typing.Any],
|
1892
|
+
parse_obj_as(
|
1893
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1894
|
+
object_=_response.json(),
|
1895
|
+
),
|
1896
|
+
)
|
1897
|
+
)
|
1534
1898
|
_response_json = _response.json()
|
1535
1899
|
except JSONDecodeError:
|
1536
1900
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1543,7 +1907,7 @@ class AsyncVellum:
|
|
1543
1907
|
deployment_id: typing.Optional[str] = OMIT,
|
1544
1908
|
deployment_name: typing.Optional[str] = OMIT,
|
1545
1909
|
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
1546
|
-
request_options: typing.Optional[RequestOptions] = None
|
1910
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1547
1911
|
) -> GenerateResponse:
|
1548
1912
|
"""
|
1549
1913
|
Generate a completion using a previously defined deployment.
|
@@ -1577,8 +1941,7 @@ class AsyncVellum:
|
|
1577
1941
|
--------
|
1578
1942
|
import asyncio
|
1579
1943
|
|
1580
|
-
from vellum import GenerateRequest
|
1581
|
-
from vellum.client import AsyncVellum
|
1944
|
+
from vellum import AsyncVellum, GenerateRequest
|
1582
1945
|
|
1583
1946
|
client = AsyncVellum(
|
1584
1947
|
api_key="YOUR_API_KEY",
|
@@ -1612,15 +1975,53 @@ class AsyncVellum:
|
|
1612
1975
|
)
|
1613
1976
|
try:
|
1614
1977
|
if 200 <= _response.status_code < 300:
|
1615
|
-
return
|
1978
|
+
return typing.cast(
|
1979
|
+
GenerateResponse,
|
1980
|
+
parse_obj_as(
|
1981
|
+
type_=GenerateResponse, # type: ignore
|
1982
|
+
object_=_response.json(),
|
1983
|
+
),
|
1984
|
+
)
|
1616
1985
|
if _response.status_code == 400:
|
1617
|
-
raise BadRequestError(
|
1986
|
+
raise BadRequestError(
|
1987
|
+
typing.cast(
|
1988
|
+
typing.Optional[typing.Any],
|
1989
|
+
parse_obj_as(
|
1990
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1991
|
+
object_=_response.json(),
|
1992
|
+
),
|
1993
|
+
)
|
1994
|
+
)
|
1618
1995
|
if _response.status_code == 403:
|
1619
|
-
raise ForbiddenError(
|
1996
|
+
raise ForbiddenError(
|
1997
|
+
typing.cast(
|
1998
|
+
typing.Optional[typing.Any],
|
1999
|
+
parse_obj_as(
|
2000
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2001
|
+
object_=_response.json(),
|
2002
|
+
),
|
2003
|
+
)
|
2004
|
+
)
|
1620
2005
|
if _response.status_code == 404:
|
1621
|
-
raise NotFoundError(
|
2006
|
+
raise NotFoundError(
|
2007
|
+
typing.cast(
|
2008
|
+
typing.Optional[typing.Any],
|
2009
|
+
parse_obj_as(
|
2010
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2011
|
+
object_=_response.json(),
|
2012
|
+
),
|
2013
|
+
)
|
2014
|
+
)
|
1622
2015
|
if _response.status_code == 500:
|
1623
|
-
raise InternalServerError(
|
2016
|
+
raise InternalServerError(
|
2017
|
+
typing.cast(
|
2018
|
+
typing.Optional[typing.Any],
|
2019
|
+
parse_obj_as(
|
2020
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2021
|
+
object_=_response.json(),
|
2022
|
+
),
|
2023
|
+
)
|
2024
|
+
)
|
1624
2025
|
_response_json = _response.json()
|
1625
2026
|
except JSONDecodeError:
|
1626
2027
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1633,7 +2034,7 @@ class AsyncVellum:
|
|
1633
2034
|
deployment_id: typing.Optional[str] = OMIT,
|
1634
2035
|
deployment_name: typing.Optional[str] = OMIT,
|
1635
2036
|
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
1636
|
-
request_options: typing.Optional[RequestOptions] = None
|
2037
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1637
2038
|
) -> typing.AsyncIterator[GenerateStreamResponse]:
|
1638
2039
|
"""
|
1639
2040
|
Generate a stream of completions using a previously defined deployment.
|
@@ -1668,12 +2069,12 @@ class AsyncVellum:
|
|
1668
2069
|
import asyncio
|
1669
2070
|
|
1670
2071
|
from vellum import (
|
1671
|
-
|
2072
|
+
AsyncVellum,
|
1672
2073
|
ChatMessageRequest,
|
1673
2074
|
GenerateOptionsRequest,
|
1674
2075
|
GenerateRequest,
|
2076
|
+
StringChatMessageContentRequest,
|
1675
2077
|
)
|
1676
|
-
from vellum.client import AsyncVellum
|
1677
2078
|
|
1678
2079
|
client = AsyncVellum(
|
1679
2080
|
api_key="YOUR_API_KEY",
|
@@ -1691,7 +2092,7 @@ class AsyncVellum:
|
|
1691
2092
|
ChatMessageRequest(
|
1692
2093
|
text="string",
|
1693
2094
|
role="SYSTEM",
|
1694
|
-
content=
|
2095
|
+
content=StringChatMessageContentRequest(),
|
1695
2096
|
source="string",
|
1696
2097
|
)
|
1697
2098
|
],
|
@@ -1727,19 +2128,57 @@ class AsyncVellum:
|
|
1727
2128
|
try:
|
1728
2129
|
if len(_text) == 0:
|
1729
2130
|
continue
|
1730
|
-
yield
|
2131
|
+
yield typing.cast(
|
2132
|
+
GenerateStreamResponse,
|
2133
|
+
parse_obj_as(
|
2134
|
+
type_=GenerateStreamResponse, # type: ignore
|
2135
|
+
object_=json.loads(_text),
|
2136
|
+
),
|
2137
|
+
)
|
1731
2138
|
except:
|
1732
2139
|
pass
|
1733
2140
|
return
|
1734
2141
|
await _response.aread()
|
1735
2142
|
if _response.status_code == 400:
|
1736
|
-
raise BadRequestError(
|
2143
|
+
raise BadRequestError(
|
2144
|
+
typing.cast(
|
2145
|
+
typing.Optional[typing.Any],
|
2146
|
+
parse_obj_as(
|
2147
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2148
|
+
object_=_response.json(),
|
2149
|
+
),
|
2150
|
+
)
|
2151
|
+
)
|
1737
2152
|
if _response.status_code == 403:
|
1738
|
-
raise ForbiddenError(
|
2153
|
+
raise ForbiddenError(
|
2154
|
+
typing.cast(
|
2155
|
+
typing.Optional[typing.Any],
|
2156
|
+
parse_obj_as(
|
2157
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2158
|
+
object_=_response.json(),
|
2159
|
+
),
|
2160
|
+
)
|
2161
|
+
)
|
1739
2162
|
if _response.status_code == 404:
|
1740
|
-
raise NotFoundError(
|
2163
|
+
raise NotFoundError(
|
2164
|
+
typing.cast(
|
2165
|
+
typing.Optional[typing.Any],
|
2166
|
+
parse_obj_as(
|
2167
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2168
|
+
object_=_response.json(),
|
2169
|
+
),
|
2170
|
+
)
|
2171
|
+
)
|
1741
2172
|
if _response.status_code == 500:
|
1742
|
-
raise InternalServerError(
|
2173
|
+
raise InternalServerError(
|
2174
|
+
typing.cast(
|
2175
|
+
typing.Optional[typing.Any],
|
2176
|
+
parse_obj_as(
|
2177
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2178
|
+
object_=_response.json(),
|
2179
|
+
),
|
2180
|
+
)
|
2181
|
+
)
|
1743
2182
|
_response_json = _response.json()
|
1744
2183
|
except JSONDecodeError:
|
1745
2184
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1752,7 +2191,7 @@ class AsyncVellum:
|
|
1752
2191
|
index_id: typing.Optional[str] = OMIT,
|
1753
2192
|
index_name: typing.Optional[str] = OMIT,
|
1754
2193
|
options: typing.Optional[SearchRequestOptionsRequest] = OMIT,
|
1755
|
-
request_options: typing.Optional[RequestOptions] = None
|
2194
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1756
2195
|
) -> SearchResponse:
|
1757
2196
|
"""
|
1758
2197
|
Perform a search against a document index.
|
@@ -1783,7 +2222,7 @@ class AsyncVellum:
|
|
1783
2222
|
--------
|
1784
2223
|
import asyncio
|
1785
2224
|
|
1786
|
-
from vellum
|
2225
|
+
from vellum import AsyncVellum
|
1787
2226
|
|
1788
2227
|
client = AsyncVellum(
|
1789
2228
|
api_key="YOUR_API_KEY",
|
@@ -1802,19 +2241,54 @@ class AsyncVellum:
|
|
1802
2241
|
"v1/search",
|
1803
2242
|
base_url=self._client_wrapper.get_environment().predict,
|
1804
2243
|
method="POST",
|
1805
|
-
json={
|
2244
|
+
json={
|
2245
|
+
"index_id": index_id,
|
2246
|
+
"index_name": index_name,
|
2247
|
+
"query": query,
|
2248
|
+
"options": options,
|
2249
|
+
},
|
1806
2250
|
request_options=request_options,
|
1807
2251
|
omit=OMIT,
|
1808
2252
|
)
|
1809
2253
|
try:
|
1810
2254
|
if 200 <= _response.status_code < 300:
|
1811
|
-
return
|
2255
|
+
return typing.cast(
|
2256
|
+
SearchResponse,
|
2257
|
+
parse_obj_as(
|
2258
|
+
type_=SearchResponse, # type: ignore
|
2259
|
+
object_=_response.json(),
|
2260
|
+
),
|
2261
|
+
)
|
1812
2262
|
if _response.status_code == 400:
|
1813
|
-
raise BadRequestError(
|
2263
|
+
raise BadRequestError(
|
2264
|
+
typing.cast(
|
2265
|
+
typing.Optional[typing.Any],
|
2266
|
+
parse_obj_as(
|
2267
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2268
|
+
object_=_response.json(),
|
2269
|
+
),
|
2270
|
+
)
|
2271
|
+
)
|
1814
2272
|
if _response.status_code == 404:
|
1815
|
-
raise NotFoundError(
|
2273
|
+
raise NotFoundError(
|
2274
|
+
typing.cast(
|
2275
|
+
typing.Optional[typing.Any],
|
2276
|
+
parse_obj_as(
|
2277
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2278
|
+
object_=_response.json(),
|
2279
|
+
),
|
2280
|
+
)
|
2281
|
+
)
|
1816
2282
|
if _response.status_code == 500:
|
1817
|
-
raise InternalServerError(
|
2283
|
+
raise InternalServerError(
|
2284
|
+
typing.cast(
|
2285
|
+
typing.Optional[typing.Any],
|
2286
|
+
parse_obj_as(
|
2287
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2288
|
+
object_=_response.json(),
|
2289
|
+
),
|
2290
|
+
)
|
2291
|
+
)
|
1818
2292
|
_response_json = _response.json()
|
1819
2293
|
except JSONDecodeError:
|
1820
2294
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1826,7 +2300,7 @@ class AsyncVellum:
|
|
1826
2300
|
actuals: typing.Sequence[SubmitCompletionActualRequest],
|
1827
2301
|
deployment_id: typing.Optional[str] = OMIT,
|
1828
2302
|
deployment_name: typing.Optional[str] = OMIT,
|
1829
|
-
request_options: typing.Optional[RequestOptions] = None
|
2303
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1830
2304
|
) -> None:
|
1831
2305
|
"""
|
1832
2306
|
Used to submit feedback regarding the quality of previously generated completions.
|
@@ -1853,8 +2327,7 @@ class AsyncVellum:
|
|
1853
2327
|
--------
|
1854
2328
|
import asyncio
|
1855
2329
|
|
1856
|
-
from vellum import SubmitCompletionActualRequest
|
1857
|
-
from vellum.client import AsyncVellum
|
2330
|
+
from vellum import AsyncVellum, SubmitCompletionActualRequest
|
1858
2331
|
|
1859
2332
|
client = AsyncVellum(
|
1860
2333
|
api_key="YOUR_API_KEY",
|
@@ -1873,7 +2346,11 @@ class AsyncVellum:
|
|
1873
2346
|
"v1/submit-completion-actuals",
|
1874
2347
|
base_url=self._client_wrapper.get_environment().predict,
|
1875
2348
|
method="POST",
|
1876
|
-
json={
|
2349
|
+
json={
|
2350
|
+
"deployment_id": deployment_id,
|
2351
|
+
"deployment_name": deployment_name,
|
2352
|
+
"actuals": actuals,
|
2353
|
+
},
|
1877
2354
|
request_options=request_options,
|
1878
2355
|
omit=OMIT,
|
1879
2356
|
)
|
@@ -1881,11 +2358,35 @@ class AsyncVellum:
|
|
1881
2358
|
if 200 <= _response.status_code < 300:
|
1882
2359
|
return
|
1883
2360
|
if _response.status_code == 400:
|
1884
|
-
raise BadRequestError(
|
2361
|
+
raise BadRequestError(
|
2362
|
+
typing.cast(
|
2363
|
+
typing.Optional[typing.Any],
|
2364
|
+
parse_obj_as(
|
2365
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2366
|
+
object_=_response.json(),
|
2367
|
+
),
|
2368
|
+
)
|
2369
|
+
)
|
1885
2370
|
if _response.status_code == 404:
|
1886
|
-
raise NotFoundError(
|
2371
|
+
raise NotFoundError(
|
2372
|
+
typing.cast(
|
2373
|
+
typing.Optional[typing.Any],
|
2374
|
+
parse_obj_as(
|
2375
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2376
|
+
object_=_response.json(),
|
2377
|
+
),
|
2378
|
+
)
|
2379
|
+
)
|
1887
2380
|
if _response.status_code == 500:
|
1888
|
-
raise InternalServerError(
|
2381
|
+
raise InternalServerError(
|
2382
|
+
typing.cast(
|
2383
|
+
typing.Optional[typing.Any],
|
2384
|
+
parse_obj_as(
|
2385
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
2386
|
+
object_=_response.json(),
|
2387
|
+
),
|
2388
|
+
)
|
2389
|
+
)
|
1889
2390
|
_response_json = _response.json()
|
1890
2391
|
except JSONDecodeError:
|
1891
2392
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
@@ -1897,7 +2398,7 @@ class AsyncVellum:
|
|
1897
2398
|
actuals: typing.Sequence[SubmitWorkflowExecutionActualRequest],
|
1898
2399
|
execution_id: typing.Optional[str] = OMIT,
|
1899
2400
|
external_id: typing.Optional[str] = OMIT,
|
1900
|
-
request_options: typing.Optional[RequestOptions] = None
|
2401
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1901
2402
|
) -> None:
|
1902
2403
|
"""
|
1903
2404
|
Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
@@ -1926,7 +2427,7 @@ class AsyncVellum:
|
|
1926
2427
|
--------
|
1927
2428
|
import asyncio
|
1928
2429
|
|
1929
|
-
from vellum
|
2430
|
+
from vellum import AsyncVellum, WorkflowExecutionActualStringRequest
|
1930
2431
|
|
1931
2432
|
client = AsyncVellum(
|
1932
2433
|
api_key="YOUR_API_KEY",
|
@@ -1935,7 +2436,7 @@ class AsyncVellum:
|
|
1935
2436
|
|
1936
2437
|
async def main() -> None:
|
1937
2438
|
await client.submit_workflow_execution_actuals(
|
1938
|
-
actuals=[],
|
2439
|
+
actuals=[WorkflowExecutionActualStringRequest()],
|
1939
2440
|
)
|
1940
2441
|
|
1941
2442
|
|
@@ -1945,7 +2446,11 @@ class AsyncVellum:
|
|
1945
2446
|
"v1/submit-workflow-execution-actuals",
|
1946
2447
|
base_url=self._client_wrapper.get_environment().predict,
|
1947
2448
|
method="POST",
|
1948
|
-
json={
|
2449
|
+
json={
|
2450
|
+
"actuals": actuals,
|
2451
|
+
"execution_id": execution_id,
|
2452
|
+
"external_id": external_id,
|
2453
|
+
},
|
1949
2454
|
request_options=request_options,
|
1950
2455
|
omit=OMIT,
|
1951
2456
|
)
|