vellum-ai 0.6.9__tar.gz → 0.7.1__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/PKG-INFO +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/pyproject.toml +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/__init__.py +14 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/client.py +28 -28
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/client_wrapper.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/test_suites/resources.py +5 -5
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/document_indexes/client.py +114 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/test_suites/client.py +19 -51
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/__init__.py +14 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_json_result.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_json_vellum_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/json_variable_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/json_vellum_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/merge_node_result.py +3 -0
- vellum_ai-0.7.1/src/vellum/types/merge_node_result_data.py +25 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_json_variable_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_json_variable_value_request.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_json_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_json_value.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_node_result_data.py +1 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_json_result.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_json_result.py +1 -1
- vellum_ai-0.7.1/src/vellum/types/test_suite_run_execution_array_output.py +32 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_json_output.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_output.py +12 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_number_output.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_string_output.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_bulk_operation_request.py +12 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_rejected_bulk_result.py +1 -1
- vellum_ai-0.7.1/src/vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +35 -0
- vellum_ai-0.7.1/src/vellum/types/upsert_enum.py +5 -0
- vellum_ai-0.7.1/src/vellum/types/upsert_test_suite_test_case_request.py +49 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_json.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_request_json_input_request.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_json.py +1 -1
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/LICENSE +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/README.md +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/api_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/datetime_utils.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/file.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/http_client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/jsonable_encoder.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/pydantic_utilities.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/remove_none_from_dict.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/core/request_options.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/environment.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/errors/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/errors/bad_request_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/errors/forbidden_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/errors/internal_server_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/errors/not_found_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/test_suites/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/test_suites/constants.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/test_suites/exceptions.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/utils/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/utils/env.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/utils/exceptions.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/lib/utils/paginator.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/py.typed +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/deployments/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/deployments/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/deployments/types/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/deployments/types/deployments_list_request_status.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/document_indexes/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/document_indexes/types/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/document_indexes/types/document_indexes_list_request_status.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/documents/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/documents/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/folder_entities/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/folder_entities/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/sandboxes/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/sandboxes/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/test_suite_runs/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/test_suite_runs/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/test_suites/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_deployments/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_deployments/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_deployments/types/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_sandboxes/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/resources/workflow_sandboxes/client.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/_jsii/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/_jsii/vellum-ai_vellum@0.0.0.jsii.tgz +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/constraints.json +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/data_vellum_document_index/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/document_index/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/provider/__init__.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/py.typed +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/terraform/versions.json +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/add_openai_api_key_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/api_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/api_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_chat_message_content.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_chat_message_content_item.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_chat_message_content_item_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_chat_message_content_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_variable_value_item.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/array_vellum_value_item.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_history_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_history_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_message.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_message_content.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_message_content_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_message_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/chat_message_role.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_array_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_chat_history_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_error_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_function_call_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_number_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_result_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_search_results_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/code_execution_node_string_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/conditional_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/conditional_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/create_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/create_test_suite_test_case_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/created_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/delete_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deleted_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deployment_provider_payload_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deployment_provider_payload_response_payload.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deployment_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deployment_release_tag_deployment_history_item.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/deployment_release_tag_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_document_to_document_index.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_index_chunking.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_index_chunking_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_index_indexing_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_index_indexing_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_index_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/document_status.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/enriched_normalized_completion.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/entity_status.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/environment_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/error_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/error_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/error_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_prompt_api_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_prompt_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_prompt_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_workflow_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_workflow_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_array_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_chat_history_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_error_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_function_call_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_number_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_search_results_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_string_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/execution_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/external_test_case_execution.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/external_test_case_execution_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/finish_reason_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_execute_prompt_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_execute_prompt_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_prompt_execution_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/fulfilled_workflow_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_chat_message_content.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_chat_message_content_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_chat_message_content_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_chat_message_content_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/function_call_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_options_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_result_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_stream_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_stream_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/generate_stream_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/hkunlp_instructor_xl_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/hkunlp_instructor_xl_vectorizer.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/hkunlp_instructor_xl_vectorizer_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/image_chat_message_content.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/image_chat_message_content_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/image_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/image_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/image_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/indexing_config_vectorizer.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/indexing_config_vectorizer_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/indexing_state_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/initiated_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/initiated_execute_prompt_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/initiated_prompt_execution_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/initiated_workflow_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/instructor_vectorizer_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/instructor_vectorizer_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/intfloat_multilingual_e_5_large_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/iteration_state_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/json_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/json_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/logical_operator.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/logprobs_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/map_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/map_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/map_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/merge_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/metadata_filter_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/metadata_filter_rule_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/metric_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/metric_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/ml_model_usage.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_scenario_input_chat_history_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_scenario_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_scenario_input_string_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_chat_history_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_chat_history_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_error_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_error_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_function_call_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_function_call_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_number_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_number_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_search_results_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_search_results_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_string_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_string_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/named_test_case_variable_value_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_array_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_error_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_function_call.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_number_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_compiled_string_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_array_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_chat_history_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_error_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_function_call_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_number_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_search_results_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_string_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/node_output_compiled_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/normalized_log_probs.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/normalized_token_log_probs.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/number_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/number_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/number_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_3_large.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_3_large_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_3_small.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_3_small_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_ada_002.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/open_ai_vectorizer_text_embedding_ada_002_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_document_index_read_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_slim_deployment_read_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_slim_document_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_slim_workflow_deployment_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_test_suite_run_execution_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/paginated_test_suite_test_case_list.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/pdf_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/pdf_search_result_meta_source.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/pdf_search_result_meta_source_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/processing_failure_reason_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/processing_state_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_deployment_expand_meta_request_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_deployment_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_execution_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/prompt_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/raw_prompt_execution_overrides_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/reducto_chunker_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/reducto_chunker_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/reducto_chunker_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/reducto_chunking.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/reducto_chunking_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_execute_prompt_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_execute_prompt_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_prompt_execution_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/rejected_workflow_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/release_tag_source.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/replace_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/replace_test_suite_test_case_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/replaced_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sandbox_scenario.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/scenario_input.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/scenario_input_chat_history_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/scenario_input_string_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_filters_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_request_options_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_document.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_document_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_merging_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_meta_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_meta_source.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_meta_source_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_result_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_results_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/search_weights_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_chunker_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_chunker_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_chunker_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_chunking.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_chunking_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_transformers_multi_qa_mpnet_base_cos_v_1_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/sentence_transformers_multi_qa_mpnet_base_dot_v_1_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/slim_deployment_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/slim_document.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/slim_workflow_deployment.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/streaming_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/streaming_execute_prompt_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/streaming_prompt_execution_meta.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/streaming_workflow_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_chat_message_content.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_chat_message_content_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/string_vellum_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/submit_completion_actual_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/subworkflow_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/subworkflow_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/subworkflow_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_array_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_chat_history_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_error_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_function_call_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_number_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_result_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_search_results_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/templating_node_string_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_array_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_error_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_function_call_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_number_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_result_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_search_results_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/terminal_node_string_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_chat_history_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_error_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_function_call_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_json_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_number_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_search_results_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_string_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_case_variable_value.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_deployment_release_tag_exec_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_deployment_release_tag_exec_config_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_exec_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_exec_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_chat_history_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_error_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_function_call_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_metric_definition.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_metric_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_number_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_search_results_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_execution_string_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_external_exec_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_external_exec_config_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_external_exec_config_data_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_external_exec_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_external_exec_config_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_error_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_error_output_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_number_output_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_metric_string_output_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_state.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_test_suite.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_workflow_release_tag_exec_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_run_workflow_release_tag_exec_config_type_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_bulk_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_create_bulk_operation_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_created_bulk_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_created_bulk_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_delete_bulk_operation_data_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_delete_bulk_operation_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_deleted_bulk_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_deleted_bulk_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_replace_bulk_operation_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_replaced_bulk_result.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/test_suite_test_case_replaced_bulk_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/text_embedding_3_large_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/text_embedding_3_small_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/text_embedding_ada_002_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/token_overlapping_window_chunker_config.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/token_overlapping_window_chunker_config_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/token_overlapping_window_chunker_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/token_overlapping_window_chunking.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/token_overlapping_window_chunking_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/upload_document_error_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/upload_document_response.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_error_code_enum.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_error_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_image.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_image_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_variable.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/vellum_variable_type.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_deployment_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_event_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_event_error_code.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_event_type.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_node_result_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_node_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_node_result_event_state.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_array.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_chat_history.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_function_call.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_image.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_number.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_search_results.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_output_string.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_release_tag_read.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_release_tag_workflow_deployment_history_item.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_request_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_request_number_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_request_string_input_request.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_array.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_function_call.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/types/workflow_stream_event.py +0 -0
- {vellum_ai-0.6.9 → vellum_ai-0.7.1}/src/vellum/version.py +0 -0
@@ -205,6 +205,7 @@ from .types import (
|
|
205
205
|
MapNodeResultData,
|
206
206
|
MergeEnum,
|
207
207
|
MergeNodeResult,
|
208
|
+
MergeNodeResultData,
|
208
209
|
MetadataFilterConfigRequest,
|
209
210
|
MetadataFilterRuleCombinator,
|
210
211
|
MetadataFilterRuleRequest,
|
@@ -454,6 +455,7 @@ from .types import (
|
|
454
455
|
TestSuiteRunExecConfig_External,
|
455
456
|
TestSuiteRunExecConfig_WorkflowReleaseTag,
|
456
457
|
TestSuiteRunExecution,
|
458
|
+
TestSuiteRunExecutionArrayOutput,
|
457
459
|
TestSuiteRunExecutionChatHistoryOutput,
|
458
460
|
TestSuiteRunExecutionErrorOutput,
|
459
461
|
TestSuiteRunExecutionFunctionCallOutput,
|
@@ -462,6 +464,7 @@ from .types import (
|
|
462
464
|
TestSuiteRunExecutionMetricResult,
|
463
465
|
TestSuiteRunExecutionNumberOutput,
|
464
466
|
TestSuiteRunExecutionOutput,
|
467
|
+
TestSuiteRunExecutionOutput_Array,
|
465
468
|
TestSuiteRunExecutionOutput_ChatHistory,
|
466
469
|
TestSuiteRunExecutionOutput_Error,
|
467
470
|
TestSuiteRunExecutionOutput_FunctionCall,
|
@@ -499,6 +502,7 @@ from .types import (
|
|
499
502
|
TestSuiteTestCaseBulkOperationRequest_Create,
|
500
503
|
TestSuiteTestCaseBulkOperationRequest_Delete,
|
501
504
|
TestSuiteTestCaseBulkOperationRequest_Replace,
|
505
|
+
TestSuiteTestCaseBulkOperationRequest_Upsert,
|
502
506
|
TestSuiteTestCaseBulkResult,
|
503
507
|
TestSuiteTestCaseBulkResult_Created,
|
504
508
|
TestSuiteTestCaseBulkResult_Deleted,
|
@@ -515,6 +519,7 @@ from .types import (
|
|
515
519
|
TestSuiteTestCaseReplaceBulkOperationRequest,
|
516
520
|
TestSuiteTestCaseReplacedBulkResult,
|
517
521
|
TestSuiteTestCaseReplacedBulkResultData,
|
522
|
+
TestSuiteTestCaseUpsertBulkOperationRequest,
|
518
523
|
TextEmbedding3LargeEnum,
|
519
524
|
TextEmbedding3SmallEnum,
|
520
525
|
TextEmbeddingAda002Enum,
|
@@ -525,6 +530,8 @@ from .types import (
|
|
525
530
|
TokenOverlappingWindowChunkingRequest,
|
526
531
|
UploadDocumentErrorResponse,
|
527
532
|
UploadDocumentResponse,
|
533
|
+
UpsertEnum,
|
534
|
+
UpsertTestSuiteTestCaseRequest,
|
528
535
|
VellumError,
|
529
536
|
VellumErrorCodeEnum,
|
530
537
|
VellumErrorRequest,
|
@@ -839,6 +846,7 @@ __all__ = [
|
|
839
846
|
"MapNodeResultData",
|
840
847
|
"MergeEnum",
|
841
848
|
"MergeNodeResult",
|
849
|
+
"MergeNodeResultData",
|
842
850
|
"MetadataFilterConfigRequest",
|
843
851
|
"MetadataFilterRuleCombinator",
|
844
852
|
"MetadataFilterRuleRequest",
|
@@ -1089,6 +1097,7 @@ __all__ = [
|
|
1089
1097
|
"TestSuiteRunExecConfig_External",
|
1090
1098
|
"TestSuiteRunExecConfig_WorkflowReleaseTag",
|
1091
1099
|
"TestSuiteRunExecution",
|
1100
|
+
"TestSuiteRunExecutionArrayOutput",
|
1092
1101
|
"TestSuiteRunExecutionChatHistoryOutput",
|
1093
1102
|
"TestSuiteRunExecutionErrorOutput",
|
1094
1103
|
"TestSuiteRunExecutionFunctionCallOutput",
|
@@ -1097,6 +1106,7 @@ __all__ = [
|
|
1097
1106
|
"TestSuiteRunExecutionMetricResult",
|
1098
1107
|
"TestSuiteRunExecutionNumberOutput",
|
1099
1108
|
"TestSuiteRunExecutionOutput",
|
1109
|
+
"TestSuiteRunExecutionOutput_Array",
|
1100
1110
|
"TestSuiteRunExecutionOutput_ChatHistory",
|
1101
1111
|
"TestSuiteRunExecutionOutput_Error",
|
1102
1112
|
"TestSuiteRunExecutionOutput_FunctionCall",
|
@@ -1134,6 +1144,7 @@ __all__ = [
|
|
1134
1144
|
"TestSuiteTestCaseBulkOperationRequest_Create",
|
1135
1145
|
"TestSuiteTestCaseBulkOperationRequest_Delete",
|
1136
1146
|
"TestSuiteTestCaseBulkOperationRequest_Replace",
|
1147
|
+
"TestSuiteTestCaseBulkOperationRequest_Upsert",
|
1137
1148
|
"TestSuiteTestCaseBulkResult",
|
1138
1149
|
"TestSuiteTestCaseBulkResult_Created",
|
1139
1150
|
"TestSuiteTestCaseBulkResult_Deleted",
|
@@ -1150,6 +1161,7 @@ __all__ = [
|
|
1150
1161
|
"TestSuiteTestCaseReplaceBulkOperationRequest",
|
1151
1162
|
"TestSuiteTestCaseReplacedBulkResult",
|
1152
1163
|
"TestSuiteTestCaseReplacedBulkResultData",
|
1164
|
+
"TestSuiteTestCaseUpsertBulkOperationRequest",
|
1153
1165
|
"TextEmbedding3LargeEnum",
|
1154
1166
|
"TextEmbedding3SmallEnum",
|
1155
1167
|
"TextEmbeddingAda002Enum",
|
@@ -1160,6 +1172,8 @@ __all__ = [
|
|
1160
1172
|
"TokenOverlappingWindowChunkingRequest",
|
1161
1173
|
"UploadDocumentErrorResponse",
|
1162
1174
|
"UploadDocumentResponse",
|
1175
|
+
"UpsertEnum",
|
1176
|
+
"UpsertTestSuiteTestCaseRequest",
|
1163
1177
|
"VellumEnvironment",
|
1164
1178
|
"VellumError",
|
1165
1179
|
"VellumErrorCodeEnum",
|
@@ -121,23 +121,23 @@ class Vellum:
|
|
121
121
|
Executes a deployed Prompt and returns the result.
|
122
122
|
|
123
123
|
Parameters:
|
124
|
-
- inputs: typing.Sequence[PromptDeploymentInputRequest].
|
124
|
+
- inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
|
125
125
|
|
126
126
|
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
127
127
|
|
128
|
-
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
128
|
+
- prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
129
129
|
|
130
130
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
131
131
|
|
132
|
-
- external_id: typing.Optional[str].
|
132
|
+
- external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
|
133
133
|
|
134
|
-
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest].
|
134
|
+
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
135
135
|
|
136
|
-
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
|
136
|
+
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
137
137
|
|
138
|
-
- expand_raw: typing.Optional[typing.Sequence[str]].
|
138
|
+
- expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
139
139
|
|
140
|
-
- metadata: typing.Optional[typing.Dict[str, typing.Any]].
|
140
|
+
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
141
141
|
|
142
142
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
143
143
|
---
|
@@ -256,23 +256,23 @@ class Vellum:
|
|
256
256
|
Executes a deployed Prompt and streams back the results.
|
257
257
|
|
258
258
|
Parameters:
|
259
|
-
- inputs: typing.Sequence[PromptDeploymentInputRequest].
|
259
|
+
- inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
|
260
260
|
|
261
261
|
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
262
262
|
|
263
|
-
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
263
|
+
- prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
264
264
|
|
265
265
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
266
266
|
|
267
|
-
- external_id: typing.Optional[str].
|
267
|
+
- external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
|
268
268
|
|
269
|
-
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest].
|
269
|
+
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
270
270
|
|
271
|
-
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
|
271
|
+
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
272
272
|
|
273
|
-
- expand_raw: typing.Optional[typing.Sequence[str]].
|
273
|
+
- expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
274
274
|
|
275
|
-
- metadata: typing.Optional[typing.Dict[str, typing.Any]].
|
275
|
+
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
276
276
|
|
277
277
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
278
278
|
---
|
@@ -1082,23 +1082,23 @@ class AsyncVellum:
|
|
1082
1082
|
Executes a deployed Prompt and returns the result.
|
1083
1083
|
|
1084
1084
|
Parameters:
|
1085
|
-
- inputs: typing.Sequence[PromptDeploymentInputRequest].
|
1085
|
+
- inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
|
1086
1086
|
|
1087
1087
|
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
1088
1088
|
|
1089
|
-
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
1089
|
+
- prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
1090
1090
|
|
1091
1091
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
1092
1092
|
|
1093
|
-
- external_id: typing.Optional[str].
|
1093
|
+
- external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
|
1094
1094
|
|
1095
|
-
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest].
|
1095
|
+
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
1096
1096
|
|
1097
|
-
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
|
1097
|
+
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
1098
1098
|
|
1099
|
-
- expand_raw: typing.Optional[typing.Sequence[str]].
|
1099
|
+
- expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
1100
1100
|
|
1101
|
-
- metadata: typing.Optional[typing.Dict[str, typing.Any]].
|
1101
|
+
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1102
1102
|
|
1103
1103
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
1104
1104
|
---
|
@@ -1217,23 +1217,23 @@ class AsyncVellum:
|
|
1217
1217
|
Executes a deployed Prompt and streams back the results.
|
1218
1218
|
|
1219
1219
|
Parameters:
|
1220
|
-
- inputs: typing.Sequence[PromptDeploymentInputRequest].
|
1220
|
+
- inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
|
1221
1221
|
|
1222
1222
|
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
1223
1223
|
|
1224
|
-
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
1224
|
+
- prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
1225
1225
|
|
1226
1226
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
1227
1227
|
|
1228
|
-
- external_id: typing.Optional[str].
|
1228
|
+
- external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
|
1229
1229
|
|
1230
|
-
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest].
|
1230
|
+
- expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
1231
1231
|
|
1232
|
-
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
|
1232
|
+
- raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
1233
1233
|
|
1234
|
-
- expand_raw: typing.Optional[typing.Sequence[str]].
|
1234
|
+
- expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
1235
1235
|
|
1236
|
-
- metadata: typing.Optional[typing.Dict[str, typing.Any]].
|
1236
|
+
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1237
1237
|
|
1238
1238
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
1239
1239
|
---
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
3
3
|
import logging
|
4
4
|
import time
|
5
5
|
from functools import cached_property
|
6
|
-
from typing import Callable, Generator, List,
|
6
|
+
from typing import Callable, Generator, List, cast, Iterable
|
7
7
|
|
8
8
|
from vellum import TestSuiteRunRead, TestSuiteRunMetricOutput_Number
|
9
9
|
from vellum.client import Vellum
|
@@ -174,7 +174,7 @@ class VellumTestSuiteRunResults:
|
|
174
174
|
self,
|
175
175
|
metric_identifier: str | None = None,
|
176
176
|
output_identifier: str | None = None,
|
177
|
-
) -> List[float]:
|
177
|
+
) -> List[float | None]:
|
178
178
|
"""Returns the values of a numeric metric output that match the given criteria."""
|
179
179
|
|
180
180
|
metric_outputs: list[TestSuiteRunMetricOutput_Number] = []
|
@@ -198,7 +198,7 @@ class VellumTestSuiteRunResults:
|
|
198
198
|
output_values = self.get_numeric_metric_output_values(
|
199
199
|
metric_identifier=metric_identifier, output_identifier=output_identifier
|
200
200
|
)
|
201
|
-
return sum(output_values) / len(output_values)
|
201
|
+
return sum(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values))) / len(output_values)
|
202
202
|
|
203
203
|
def get_min_metric_output(
|
204
204
|
self, metric_identifier: str | None = None, output_identifier: str | None = None
|
@@ -207,7 +207,7 @@ class VellumTestSuiteRunResults:
|
|
207
207
|
output_values = self.get_numeric_metric_output_values(
|
208
208
|
metric_identifier=metric_identifier, output_identifier=output_identifier
|
209
209
|
)
|
210
|
-
return min(output_values)
|
210
|
+
return min(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values)))
|
211
211
|
|
212
212
|
def get_max_metric_output(
|
213
213
|
self, metric_identifier: str | None = None, output_identifier: str | None = None
|
@@ -216,7 +216,7 @@ class VellumTestSuiteRunResults:
|
|
216
216
|
output_values = self.get_numeric_metric_output_values(
|
217
217
|
metric_identifier=metric_identifier, output_identifier=output_identifier
|
218
218
|
)
|
219
|
-
return max(output_values)
|
219
|
+
return max(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values)))
|
220
220
|
|
221
221
|
def wait_until_complete(self) -> None:
|
222
222
|
"""Wait until the Test Suite Run is no longer in a QUEUED or RUNNING state."""
|
@@ -458,6 +458,63 @@ class DocumentIndexesClient:
|
|
458
458
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
459
459
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
460
460
|
|
461
|
+
def add_document(
|
462
|
+
self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
|
463
|
+
) -> None:
|
464
|
+
"""
|
465
|
+
Adds a previously uploaded Document to the specified Document Index.
|
466
|
+
|
467
|
+
Parameters:
|
468
|
+
- document_id: str. Either the Vellum-generated ID or the originally supplied external_id that uniquely identifies the Document you'd like to add.
|
469
|
+
|
470
|
+
- id: str. Either the Vellum-generated ID or the originally specified name that uniquely identifies the Document Index to which you'd like to add the Document.
|
471
|
+
|
472
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
473
|
+
---
|
474
|
+
from vellum.client import Vellum
|
475
|
+
|
476
|
+
client = Vellum(
|
477
|
+
api_key="YOUR_API_KEY",
|
478
|
+
)
|
479
|
+
client.document_indexes.add_document(
|
480
|
+
document_id="document_id",
|
481
|
+
id="id",
|
482
|
+
)
|
483
|
+
"""
|
484
|
+
_response = self._client_wrapper.httpx_client.request(
|
485
|
+
method="POST",
|
486
|
+
url=urllib.parse.urljoin(
|
487
|
+
f"{self._client_wrapper.get_environment().default}/",
|
488
|
+
f"v1/document-indexes/{jsonable_encoder(id)}/documents/{jsonable_encoder(document_id)}",
|
489
|
+
),
|
490
|
+
params=jsonable_encoder(
|
491
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
492
|
+
),
|
493
|
+
json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))
|
494
|
+
if request_options is not None
|
495
|
+
else None,
|
496
|
+
headers=jsonable_encoder(
|
497
|
+
remove_none_from_dict(
|
498
|
+
{
|
499
|
+
**self._client_wrapper.get_headers(),
|
500
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
501
|
+
}
|
502
|
+
)
|
503
|
+
),
|
504
|
+
timeout=request_options.get("timeout_in_seconds")
|
505
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
506
|
+
else self._client_wrapper.get_timeout(),
|
507
|
+
retries=0,
|
508
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
509
|
+
)
|
510
|
+
if 200 <= _response.status_code < 300:
|
511
|
+
return
|
512
|
+
try:
|
513
|
+
_response_json = _response.json()
|
514
|
+
except JSONDecodeError:
|
515
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
516
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
517
|
+
|
461
518
|
def remove_document(
|
462
519
|
self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
|
463
520
|
) -> None:
|
@@ -950,6 +1007,63 @@ class AsyncDocumentIndexesClient:
|
|
950
1007
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
951
1008
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
952
1009
|
|
1010
|
+
async def add_document(
|
1011
|
+
self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
|
1012
|
+
) -> None:
|
1013
|
+
"""
|
1014
|
+
Adds a previously uploaded Document to the specified Document Index.
|
1015
|
+
|
1016
|
+
Parameters:
|
1017
|
+
- document_id: str. Either the Vellum-generated ID or the originally supplied external_id that uniquely identifies the Document you'd like to add.
|
1018
|
+
|
1019
|
+
- id: str. Either the Vellum-generated ID or the originally specified name that uniquely identifies the Document Index to which you'd like to add the Document.
|
1020
|
+
|
1021
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
1022
|
+
---
|
1023
|
+
from vellum.client import AsyncVellum
|
1024
|
+
|
1025
|
+
client = AsyncVellum(
|
1026
|
+
api_key="YOUR_API_KEY",
|
1027
|
+
)
|
1028
|
+
await client.document_indexes.add_document(
|
1029
|
+
document_id="document_id",
|
1030
|
+
id="id",
|
1031
|
+
)
|
1032
|
+
"""
|
1033
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1034
|
+
method="POST",
|
1035
|
+
url=urllib.parse.urljoin(
|
1036
|
+
f"{self._client_wrapper.get_environment().default}/",
|
1037
|
+
f"v1/document-indexes/{jsonable_encoder(id)}/documents/{jsonable_encoder(document_id)}",
|
1038
|
+
),
|
1039
|
+
params=jsonable_encoder(
|
1040
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
1041
|
+
),
|
1042
|
+
json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))
|
1043
|
+
if request_options is not None
|
1044
|
+
else None,
|
1045
|
+
headers=jsonable_encoder(
|
1046
|
+
remove_none_from_dict(
|
1047
|
+
{
|
1048
|
+
**self._client_wrapper.get_headers(),
|
1049
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
1050
|
+
}
|
1051
|
+
)
|
1052
|
+
),
|
1053
|
+
timeout=request_options.get("timeout_in_seconds")
|
1054
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
1055
|
+
else self._client_wrapper.get_timeout(),
|
1056
|
+
retries=0,
|
1057
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
1058
|
+
)
|
1059
|
+
if 200 <= _response.status_code < 300:
|
1060
|
+
return
|
1061
|
+
try:
|
1062
|
+
_response_json = _response.json()
|
1063
|
+
except JSONDecodeError:
|
1064
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1065
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1066
|
+
|
953
1067
|
async def remove_document(
|
954
1068
|
self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
|
955
1069
|
) -> None:
|
@@ -11,11 +11,11 @@ from ...core.jsonable_encoder import jsonable_encoder
|
|
11
11
|
from ...core.pydantic_utilities import pydantic_v1
|
12
12
|
from ...core.remove_none_from_dict import remove_none_from_dict
|
13
13
|
from ...core.request_options import RequestOptions
|
14
|
-
from ...types.named_test_case_variable_value_request import NamedTestCaseVariableValueRequest
|
15
14
|
from ...types.paginated_test_suite_test_case_list import PaginatedTestSuiteTestCaseList
|
16
15
|
from ...types.test_suite_test_case import TestSuiteTestCase
|
17
16
|
from ...types.test_suite_test_case_bulk_operation_request import TestSuiteTestCaseBulkOperationRequest
|
18
17
|
from ...types.test_suite_test_case_bulk_result import TestSuiteTestCaseBulkResult
|
18
|
+
from ...types.upsert_test_suite_test_case_request import UpsertTestSuiteTestCaseRequest
|
19
19
|
|
20
20
|
# this is used as the default value for optional parameters
|
21
21
|
OMIT = typing.cast(typing.Any, ...)
|
@@ -99,11 +99,7 @@ class TestSuitesClient:
|
|
99
99
|
self,
|
100
100
|
id: str,
|
101
101
|
*,
|
102
|
-
|
103
|
-
external_id: typing.Optional[str] = OMIT,
|
104
|
-
label: typing.Optional[str] = OMIT,
|
105
|
-
input_values: typing.Sequence[NamedTestCaseVariableValueRequest],
|
106
|
-
evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest],
|
102
|
+
request: UpsertTestSuiteTestCaseRequest,
|
107
103
|
request_options: typing.Optional[RequestOptions] = None,
|
108
104
|
) -> TestSuiteTestCase:
|
109
105
|
"""
|
@@ -118,18 +114,11 @@ class TestSuitesClient:
|
|
118
114
|
Parameters:
|
119
115
|
- id: str. A UUID string identifying this test suite.
|
120
116
|
|
121
|
-
-
|
122
|
-
|
123
|
-
- external_id: typing.Optional[str]. An ID external to Vellum that uniquely identifies the Test Case that you'd like to create/update. If there's a match on a Test Case that was previously created with the same external_id, it will be updated. Otherwise, a new Test Case will be created with this value as its external_id. If no external_id is specified, then a new Test Case will always be created.
|
124
|
-
|
125
|
-
- label: typing.Optional[str]. A human-readable label used to convey the intention of this Test Case
|
126
|
-
|
127
|
-
- input_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's input variables
|
128
|
-
|
129
|
-
- evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's evaluation variables
|
117
|
+
- request: UpsertTestSuiteTestCaseRequest.
|
130
118
|
|
131
119
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
132
120
|
---
|
121
|
+
from vellum import UpsertTestSuiteTestCaseRequest
|
133
122
|
from vellum.client import Vellum
|
134
123
|
|
135
124
|
client = Vellum(
|
@@ -137,17 +126,12 @@ class TestSuitesClient:
|
|
137
126
|
)
|
138
127
|
client.test_suites.upsert_test_suite_test_case(
|
139
128
|
id="id",
|
140
|
-
|
141
|
-
|
129
|
+
request=UpsertTestSuiteTestCaseRequest(
|
130
|
+
input_values=[],
|
131
|
+
evaluation_values=[],
|
132
|
+
),
|
142
133
|
)
|
143
134
|
"""
|
144
|
-
_request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
|
145
|
-
if upsert_test_suite_test_case_request_id is not OMIT:
|
146
|
-
_request["id"] = upsert_test_suite_test_case_request_id
|
147
|
-
if external_id is not OMIT:
|
148
|
-
_request["external_id"] = external_id
|
149
|
-
if label is not OMIT:
|
150
|
-
_request["label"] = label
|
151
135
|
_response = self._client_wrapper.httpx_client.request(
|
152
136
|
method="POST",
|
153
137
|
url=urllib.parse.urljoin(
|
@@ -157,10 +141,10 @@ class TestSuitesClient:
|
|
157
141
|
params=jsonable_encoder(
|
158
142
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
159
143
|
),
|
160
|
-
json=jsonable_encoder(
|
144
|
+
json=jsonable_encoder(request)
|
161
145
|
if request_options is None or request_options.get("additional_body_parameters") is None
|
162
146
|
else {
|
163
|
-
**jsonable_encoder(
|
147
|
+
**jsonable_encoder(request),
|
164
148
|
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
165
149
|
},
|
166
150
|
headers=jsonable_encoder(
|
@@ -402,11 +386,7 @@ class AsyncTestSuitesClient:
|
|
402
386
|
self,
|
403
387
|
id: str,
|
404
388
|
*,
|
405
|
-
|
406
|
-
external_id: typing.Optional[str] = OMIT,
|
407
|
-
label: typing.Optional[str] = OMIT,
|
408
|
-
input_values: typing.Sequence[NamedTestCaseVariableValueRequest],
|
409
|
-
evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest],
|
389
|
+
request: UpsertTestSuiteTestCaseRequest,
|
410
390
|
request_options: typing.Optional[RequestOptions] = None,
|
411
391
|
) -> TestSuiteTestCase:
|
412
392
|
"""
|
@@ -421,18 +401,11 @@ class AsyncTestSuitesClient:
|
|
421
401
|
Parameters:
|
422
402
|
- id: str. A UUID string identifying this test suite.
|
423
403
|
|
424
|
-
-
|
425
|
-
|
426
|
-
- external_id: typing.Optional[str]. An ID external to Vellum that uniquely identifies the Test Case that you'd like to create/update. If there's a match on a Test Case that was previously created with the same external_id, it will be updated. Otherwise, a new Test Case will be created with this value as its external_id. If no external_id is specified, then a new Test Case will always be created.
|
427
|
-
|
428
|
-
- label: typing.Optional[str]. A human-readable label used to convey the intention of this Test Case
|
429
|
-
|
430
|
-
- input_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's input variables
|
431
|
-
|
432
|
-
- evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's evaluation variables
|
404
|
+
- request: UpsertTestSuiteTestCaseRequest.
|
433
405
|
|
434
406
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
435
407
|
---
|
408
|
+
from vellum import UpsertTestSuiteTestCaseRequest
|
436
409
|
from vellum.client import AsyncVellum
|
437
410
|
|
438
411
|
client = AsyncVellum(
|
@@ -440,17 +413,12 @@ class AsyncTestSuitesClient:
|
|
440
413
|
)
|
441
414
|
await client.test_suites.upsert_test_suite_test_case(
|
442
415
|
id="id",
|
443
|
-
|
444
|
-
|
416
|
+
request=UpsertTestSuiteTestCaseRequest(
|
417
|
+
input_values=[],
|
418
|
+
evaluation_values=[],
|
419
|
+
),
|
445
420
|
)
|
446
421
|
"""
|
447
|
-
_request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
|
448
|
-
if upsert_test_suite_test_case_request_id is not OMIT:
|
449
|
-
_request["id"] = upsert_test_suite_test_case_request_id
|
450
|
-
if external_id is not OMIT:
|
451
|
-
_request["external_id"] = external_id
|
452
|
-
if label is not OMIT:
|
453
|
-
_request["label"] = label
|
454
422
|
_response = await self._client_wrapper.httpx_client.request(
|
455
423
|
method="POST",
|
456
424
|
url=urllib.parse.urljoin(
|
@@ -460,10 +428,10 @@ class AsyncTestSuitesClient:
|
|
460
428
|
params=jsonable_encoder(
|
461
429
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
462
430
|
),
|
463
|
-
json=jsonable_encoder(
|
431
|
+
json=jsonable_encoder(request)
|
464
432
|
if request_options is None or request_options.get("additional_body_parameters") is None
|
465
433
|
else {
|
466
|
-
**jsonable_encoder(
|
434
|
+
**jsonable_encoder(request),
|
467
435
|
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
468
436
|
},
|
469
437
|
headers=jsonable_encoder(
|