vellum-ai 0.3.11__tar.gz → 0.3.12__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/PKG-INFO +1 -1
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/pyproject.toml +1 -1
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/__init__.py +4 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/client_wrapper.py +1 -1
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/client.py +282 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/__init__.py +4 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_variable_value_item.py +11 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_workflow_node_result_event.py +2 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_request.py +1 -1
- vellum_ai-0.3.12/src/vellum/types/image_variable_value.py +33 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/initiated_workflow_node_result_event.py +1 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_workflow_node_result_event.py +1 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/streaming_workflow_node_result_event.py +1 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_event_error_code.py +5 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/LICENSE +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/README.md +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/api_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/datetime_utils.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/jsonable_encoder.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/core/remove_none_from_dict.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/environment.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/bad_request_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/conflict_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/forbidden_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/internal_server_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/errors/not_found_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/py.typed +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/deployments/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/deployments/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/deployments/types/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/deployments/types/deployments_list_request_status.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/types/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/types/document_indexes_list_request_status.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/documents/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/documents/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/folder_entities/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/folder_entities/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/model_versions/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/model_versions/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/registered_prompts/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/registered_prompts/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/sandboxes/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/sandboxes/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/test_suites/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/test_suites/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/client.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/types/__init__.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/api_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/api_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_item.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_item_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/block_type_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_history_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_history_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_history_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_message.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_message_content.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_message_content_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_message_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/chat_message_role.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_chat_history_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_error_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_json_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_number_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result_output.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_search_results_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_string_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/conditional_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/conditional_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/deployment_provider_payload_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/deployment_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/document_document_to_document_index.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/document_index_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/document_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/document_status.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/enriched_normalized_completion.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/entity_status.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/environment_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/error_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/error_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_api_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_array_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_chat_history_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_error_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_function_call_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_json_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_number_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_search_results_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_string_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/execution_vellum_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/finish_reason_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_prompt_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_prompt_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_function_call.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_prompt_execution_meta.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/function_call_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_options_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_result_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/image_chat_message_content.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/image_chat_message_content_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/image_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/indexing_state_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/initiated_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/initiated_execute_prompt_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/initiated_prompt_execution_meta.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/json_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/json_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/json_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/logical_operator.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/logprobs_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_config_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_rule_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_build_config.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_exec_config.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_read_status_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_chat_history_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_error_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_json_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_number_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_search_results_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_string_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_variable_value_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_array_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_error_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_json_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_number_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_string_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_array_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_chat_history_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_error_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_function_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_json_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_number_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_search_results_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_string_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/normalized_log_probs.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/normalized_token_log_probs.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/number_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/number_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/paginated_document_index_read_list.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_deployment_read_list.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_document_list.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_workflow_deployment_list.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/processing_failure_reason_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/processing_state_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_deployment_expand_meta_request_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_deployment_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_execution_meta.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_output.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_data_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_properties.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_properties_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/provider_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/raw_prompt_execution_overrides_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_model_parameters_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_prompt.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_deployment.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_model_version.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_sandbox.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_prompt_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_prompt_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_workflow_workflow_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_function_call.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_prompt_execution_meta.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/sandbox_scenario.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/scenario_input.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/scenario_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/scenario_input_type_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_filters_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_request_options_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_result_document.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_result_document_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_result_merging_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_result_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_results_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_results_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/search_weights_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/slim_deployment_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/slim_document.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/slim_workflow_deployment.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/streaming_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/streaming_execute_prompt_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/streaming_prompt_execution_meta.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/string_chat_message_content.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/string_chat_message_content_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/string_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/string_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/string_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/submit_completion_actual_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/subworkflow_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/subworkflow_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_chat_history_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_error_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_json_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_number_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result_output.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_search_results_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/templating_node_string_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_array_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_error_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_function_call_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_json_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_number_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result_output.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_search_results_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_string_result.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_chat_history_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_error_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_json_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_number_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_search_results_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_string_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_case_variable_value.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/test_suite_test_case.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/upload_document_error_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/upload_document_response.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_error_code_enum.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_error_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_image.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_image_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_variable.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/vellum_variable_type.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_deployment_read.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_event_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_event_type.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_event_state.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_array.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_chat_history.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_function_call.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_image.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_json.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_number.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_search_results.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_string.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_json_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_number_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_string_input_request.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_array.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_function_call.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
- {vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -18,6 +18,7 @@ from .types import (
|
|
18
18
|
ArrayVariableValueItem_ChatHistory,
|
19
19
|
ArrayVariableValueItem_Error,
|
20
20
|
ArrayVariableValueItem_FunctionCall,
|
21
|
+
ArrayVariableValueItem_Image,
|
21
22
|
ArrayVariableValueItem_Json,
|
22
23
|
ArrayVariableValueItem_Number,
|
23
24
|
ArrayVariableValueItem_SearchResults,
|
@@ -129,6 +130,7 @@ from .types import (
|
|
129
130
|
ImageChatMessageContent,
|
130
131
|
ImageChatMessageContentRequest,
|
131
132
|
ImageEnum,
|
133
|
+
ImageVariableValue,
|
132
134
|
IndexingStateEnum,
|
133
135
|
InitiatedEnum,
|
134
136
|
InitiatedExecutePromptEvent,
|
@@ -445,6 +447,7 @@ __all__ = [
|
|
445
447
|
"ArrayVariableValueItem_ChatHistory",
|
446
448
|
"ArrayVariableValueItem_Error",
|
447
449
|
"ArrayVariableValueItem_FunctionCall",
|
450
|
+
"ArrayVariableValueItem_Image",
|
448
451
|
"ArrayVariableValueItem_Json",
|
449
452
|
"ArrayVariableValueItem_Number",
|
450
453
|
"ArrayVariableValueItem_SearchResults",
|
@@ -561,6 +564,7 @@ __all__ = [
|
|
561
564
|
"ImageChatMessageContent",
|
562
565
|
"ImageChatMessageContentRequest",
|
563
566
|
"ImageEnum",
|
567
|
+
"ImageVariableValue",
|
564
568
|
"IndexingStateEnum",
|
565
569
|
"InitiatedEnum",
|
566
570
|
"InitiatedExecutePromptEvent",
|
@@ -16,7 +16,7 @@ class BaseClientWrapper:
|
|
16
16
|
headers: typing.Dict[str, str] = {
|
17
17
|
"X-Fern-Language": "Python",
|
18
18
|
"X-Fern-SDK-Name": "vellum-ai",
|
19
|
-
"X-Fern-SDK-Version": "0.3.
|
19
|
+
"X-Fern-SDK-Version": "0.3.12",
|
20
20
|
}
|
21
21
|
headers["X_API_KEY"] = self.api_key
|
22
22
|
return headers
|
@@ -187,6 +187,147 @@ class DocumentIndexesClient:
|
|
187
187
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
188
188
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
189
189
|
|
190
|
+
def update(
|
191
|
+
self,
|
192
|
+
id: str,
|
193
|
+
*,
|
194
|
+
label: str,
|
195
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
196
|
+
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
197
|
+
) -> DocumentIndexRead:
|
198
|
+
"""
|
199
|
+
Used to fully update a Document Index given its ID.
|
200
|
+
|
201
|
+
Parameters:
|
202
|
+
- id: str. A UUID string identifying this document index.
|
203
|
+
|
204
|
+
- label: str. A human-readable label for the document index
|
205
|
+
|
206
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
207
|
+
|
208
|
+
* `ACTIVE` - Active
|
209
|
+
* `ARCHIVED` - Archived
|
210
|
+
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
211
|
+
|
212
|
+
* `DEVELOPMENT` - Development
|
213
|
+
* `STAGING` - Staging
|
214
|
+
* `PRODUCTION` - Production---
|
215
|
+
from vellum.client import Vellum
|
216
|
+
|
217
|
+
client = Vellum(
|
218
|
+
api_key="YOUR_API_KEY",
|
219
|
+
)
|
220
|
+
client.document_indexes.update(
|
221
|
+
id="id",
|
222
|
+
label="label",
|
223
|
+
)
|
224
|
+
"""
|
225
|
+
_request: typing.Dict[str, typing.Any] = {"label": label}
|
226
|
+
if status is not OMIT:
|
227
|
+
_request["status"] = status
|
228
|
+
if environment is not OMIT:
|
229
|
+
_request["environment"] = environment
|
230
|
+
_response = self._client_wrapper.httpx_client.request(
|
231
|
+
"PUT",
|
232
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
233
|
+
json=jsonable_encoder(_request),
|
234
|
+
headers=self._client_wrapper.get_headers(),
|
235
|
+
timeout=None,
|
236
|
+
)
|
237
|
+
if 200 <= _response.status_code < 300:
|
238
|
+
return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
|
239
|
+
try:
|
240
|
+
_response_json = _response.json()
|
241
|
+
except JSONDecodeError:
|
242
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
243
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
244
|
+
|
245
|
+
def destroy(self, id: str) -> None:
|
246
|
+
"""
|
247
|
+
Used to delete a Document Index given its ID.
|
248
|
+
|
249
|
+
Parameters:
|
250
|
+
- id: str. A UUID string identifying this document index.
|
251
|
+
---
|
252
|
+
from vellum.client import Vellum
|
253
|
+
|
254
|
+
client = Vellum(
|
255
|
+
api_key="YOUR_API_KEY",
|
256
|
+
)
|
257
|
+
client.document_indexes.destroy(
|
258
|
+
id="id",
|
259
|
+
)
|
260
|
+
"""
|
261
|
+
_response = self._client_wrapper.httpx_client.request(
|
262
|
+
"DELETE",
|
263
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
264
|
+
headers=self._client_wrapper.get_headers(),
|
265
|
+
timeout=None,
|
266
|
+
)
|
267
|
+
if 200 <= _response.status_code < 300:
|
268
|
+
return
|
269
|
+
try:
|
270
|
+
_response_json = _response.json()
|
271
|
+
except JSONDecodeError:
|
272
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
273
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
274
|
+
|
275
|
+
def partial_update(
|
276
|
+
self,
|
277
|
+
id: str,
|
278
|
+
*,
|
279
|
+
label: typing.Optional[str] = OMIT,
|
280
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
281
|
+
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
282
|
+
) -> DocumentIndexRead:
|
283
|
+
"""
|
284
|
+
Used to partial update a Document Index given its ID.
|
285
|
+
|
286
|
+
Parameters:
|
287
|
+
- id: str. A UUID string identifying this document index.
|
288
|
+
|
289
|
+
- label: typing.Optional[str]. A human-readable label for the document index
|
290
|
+
|
291
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
292
|
+
|
293
|
+
* `ACTIVE` - Active
|
294
|
+
* `ARCHIVED` - Archived
|
295
|
+
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
296
|
+
|
297
|
+
* `DEVELOPMENT` - Development
|
298
|
+
* `STAGING` - Staging
|
299
|
+
* `PRODUCTION` - Production---
|
300
|
+
from vellum.client import Vellum
|
301
|
+
|
302
|
+
client = Vellum(
|
303
|
+
api_key="YOUR_API_KEY",
|
304
|
+
)
|
305
|
+
client.document_indexes.partial_update(
|
306
|
+
id="id",
|
307
|
+
)
|
308
|
+
"""
|
309
|
+
_request: typing.Dict[str, typing.Any] = {}
|
310
|
+
if label is not OMIT:
|
311
|
+
_request["label"] = label
|
312
|
+
if status is not OMIT:
|
313
|
+
_request["status"] = status
|
314
|
+
if environment is not OMIT:
|
315
|
+
_request["environment"] = environment
|
316
|
+
_response = self._client_wrapper.httpx_client.request(
|
317
|
+
"PATCH",
|
318
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
319
|
+
json=jsonable_encoder(_request),
|
320
|
+
headers=self._client_wrapper.get_headers(),
|
321
|
+
timeout=None,
|
322
|
+
)
|
323
|
+
if 200 <= _response.status_code < 300:
|
324
|
+
return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
|
325
|
+
try:
|
326
|
+
_response_json = _response.json()
|
327
|
+
except JSONDecodeError:
|
328
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
329
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
330
|
+
|
190
331
|
|
191
332
|
class AsyncDocumentIndexesClient:
|
192
333
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
@@ -351,3 +492,144 @@ class AsyncDocumentIndexesClient:
|
|
351
492
|
except JSONDecodeError:
|
352
493
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
353
494
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
495
|
+
|
496
|
+
async def update(
|
497
|
+
self,
|
498
|
+
id: str,
|
499
|
+
*,
|
500
|
+
label: str,
|
501
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
502
|
+
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
503
|
+
) -> DocumentIndexRead:
|
504
|
+
"""
|
505
|
+
Used to fully update a Document Index given its ID.
|
506
|
+
|
507
|
+
Parameters:
|
508
|
+
- id: str. A UUID string identifying this document index.
|
509
|
+
|
510
|
+
- label: str. A human-readable label for the document index
|
511
|
+
|
512
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
513
|
+
|
514
|
+
* `ACTIVE` - Active
|
515
|
+
* `ARCHIVED` - Archived
|
516
|
+
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
517
|
+
|
518
|
+
* `DEVELOPMENT` - Development
|
519
|
+
* `STAGING` - Staging
|
520
|
+
* `PRODUCTION` - Production---
|
521
|
+
from vellum.client import AsyncVellum
|
522
|
+
|
523
|
+
client = AsyncVellum(
|
524
|
+
api_key="YOUR_API_KEY",
|
525
|
+
)
|
526
|
+
await client.document_indexes.update(
|
527
|
+
id="id",
|
528
|
+
label="label",
|
529
|
+
)
|
530
|
+
"""
|
531
|
+
_request: typing.Dict[str, typing.Any] = {"label": label}
|
532
|
+
if status is not OMIT:
|
533
|
+
_request["status"] = status
|
534
|
+
if environment is not OMIT:
|
535
|
+
_request["environment"] = environment
|
536
|
+
_response = await self._client_wrapper.httpx_client.request(
|
537
|
+
"PUT",
|
538
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
539
|
+
json=jsonable_encoder(_request),
|
540
|
+
headers=self._client_wrapper.get_headers(),
|
541
|
+
timeout=None,
|
542
|
+
)
|
543
|
+
if 200 <= _response.status_code < 300:
|
544
|
+
return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
|
545
|
+
try:
|
546
|
+
_response_json = _response.json()
|
547
|
+
except JSONDecodeError:
|
548
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
549
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
550
|
+
|
551
|
+
async def destroy(self, id: str) -> None:
|
552
|
+
"""
|
553
|
+
Used to delete a Document Index given its ID.
|
554
|
+
|
555
|
+
Parameters:
|
556
|
+
- id: str. A UUID string identifying this document index.
|
557
|
+
---
|
558
|
+
from vellum.client import AsyncVellum
|
559
|
+
|
560
|
+
client = AsyncVellum(
|
561
|
+
api_key="YOUR_API_KEY",
|
562
|
+
)
|
563
|
+
await client.document_indexes.destroy(
|
564
|
+
id="id",
|
565
|
+
)
|
566
|
+
"""
|
567
|
+
_response = await self._client_wrapper.httpx_client.request(
|
568
|
+
"DELETE",
|
569
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
570
|
+
headers=self._client_wrapper.get_headers(),
|
571
|
+
timeout=None,
|
572
|
+
)
|
573
|
+
if 200 <= _response.status_code < 300:
|
574
|
+
return
|
575
|
+
try:
|
576
|
+
_response_json = _response.json()
|
577
|
+
except JSONDecodeError:
|
578
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
579
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
580
|
+
|
581
|
+
async def partial_update(
|
582
|
+
self,
|
583
|
+
id: str,
|
584
|
+
*,
|
585
|
+
label: typing.Optional[str] = OMIT,
|
586
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
587
|
+
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
588
|
+
) -> DocumentIndexRead:
|
589
|
+
"""
|
590
|
+
Used to partial update a Document Index given its ID.
|
591
|
+
|
592
|
+
Parameters:
|
593
|
+
- id: str. A UUID string identifying this document index.
|
594
|
+
|
595
|
+
- label: typing.Optional[str]. A human-readable label for the document index
|
596
|
+
|
597
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
598
|
+
|
599
|
+
* `ACTIVE` - Active
|
600
|
+
* `ARCHIVED` - Archived
|
601
|
+
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
602
|
+
|
603
|
+
* `DEVELOPMENT` - Development
|
604
|
+
* `STAGING` - Staging
|
605
|
+
* `PRODUCTION` - Production---
|
606
|
+
from vellum.client import AsyncVellum
|
607
|
+
|
608
|
+
client = AsyncVellum(
|
609
|
+
api_key="YOUR_API_KEY",
|
610
|
+
)
|
611
|
+
await client.document_indexes.partial_update(
|
612
|
+
id="id",
|
613
|
+
)
|
614
|
+
"""
|
615
|
+
_request: typing.Dict[str, typing.Any] = {}
|
616
|
+
if label is not OMIT:
|
617
|
+
_request["label"] = label
|
618
|
+
if status is not OMIT:
|
619
|
+
_request["status"] = status
|
620
|
+
if environment is not OMIT:
|
621
|
+
_request["environment"] = environment
|
622
|
+
_response = await self._client_wrapper.httpx_client.request(
|
623
|
+
"PATCH",
|
624
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
|
625
|
+
json=jsonable_encoder(_request),
|
626
|
+
headers=self._client_wrapper.get_headers(),
|
627
|
+
timeout=None,
|
628
|
+
)
|
629
|
+
if 200 <= _response.status_code < 300:
|
630
|
+
return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
|
631
|
+
try:
|
632
|
+
_response_json = _response.json()
|
633
|
+
except JSONDecodeError:
|
634
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
635
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
@@ -22,6 +22,7 @@ from .array_variable_value_item import (
|
|
22
22
|
ArrayVariableValueItem_ChatHistory,
|
23
23
|
ArrayVariableValueItem_Error,
|
24
24
|
ArrayVariableValueItem_FunctionCall,
|
25
|
+
ArrayVariableValueItem_Image,
|
25
26
|
ArrayVariableValueItem_Json,
|
26
27
|
ArrayVariableValueItem_Number,
|
27
28
|
ArrayVariableValueItem_SearchResults,
|
@@ -146,6 +147,7 @@ from .generate_stream_result_data import GenerateStreamResultData
|
|
146
147
|
from .image_chat_message_content import ImageChatMessageContent
|
147
148
|
from .image_chat_message_content_request import ImageChatMessageContentRequest
|
148
149
|
from .image_enum import ImageEnum
|
150
|
+
from .image_variable_value import ImageVariableValue
|
149
151
|
from .indexing_state_enum import IndexingStateEnum
|
150
152
|
from .initiated_enum import InitiatedEnum
|
151
153
|
from .initiated_execute_prompt_event import InitiatedExecutePromptEvent
|
@@ -471,6 +473,7 @@ __all__ = [
|
|
471
473
|
"ArrayVariableValueItem_ChatHistory",
|
472
474
|
"ArrayVariableValueItem_Error",
|
473
475
|
"ArrayVariableValueItem_FunctionCall",
|
476
|
+
"ArrayVariableValueItem_Image",
|
474
477
|
"ArrayVariableValueItem_Json",
|
475
478
|
"ArrayVariableValueItem_Number",
|
476
479
|
"ArrayVariableValueItem_SearchResults",
|
@@ -582,6 +585,7 @@ __all__ = [
|
|
582
585
|
"ImageChatMessageContent",
|
583
586
|
"ImageChatMessageContentRequest",
|
584
587
|
"ImageEnum",
|
588
|
+
"ImageVariableValue",
|
585
589
|
"IndexingStateEnum",
|
586
590
|
"InitiatedEnum",
|
587
591
|
"InitiatedExecutePromptEvent",
|
@@ -9,6 +9,7 @@ import typing_extensions
|
|
9
9
|
from .chat_history_variable_value import ChatHistoryVariableValue
|
10
10
|
from .error_variable_value import ErrorVariableValue
|
11
11
|
from .function_call_variable_value import FunctionCallVariableValue
|
12
|
+
from .image_variable_value import ImageVariableValue
|
12
13
|
from .json_variable_value import JsonVariableValue
|
13
14
|
from .number_variable_value import NumberVariableValue
|
14
15
|
from .search_results_variable_value import SearchResultsVariableValue
|
@@ -78,6 +79,15 @@ class ArrayVariableValueItem_FunctionCall(FunctionCallVariableValue):
|
|
78
79
|
allow_population_by_field_name = True
|
79
80
|
|
80
81
|
|
82
|
+
class ArrayVariableValueItem_Image(ImageVariableValue):
|
83
|
+
type: typing_extensions.Literal["IMAGE"]
|
84
|
+
|
85
|
+
class Config:
|
86
|
+
frozen = True
|
87
|
+
smart_union = True
|
88
|
+
allow_population_by_field_name = True
|
89
|
+
|
90
|
+
|
81
91
|
ArrayVariableValueItem = typing.Union[
|
82
92
|
ArrayVariableValueItem_String,
|
83
93
|
ArrayVariableValueItem_Number,
|
@@ -86,4 +96,5 @@ ArrayVariableValueItem = typing.Union[
|
|
86
96
|
ArrayVariableValueItem_SearchResults,
|
87
97
|
ArrayVariableValueItem_Error,
|
88
98
|
ArrayVariableValueItem_FunctionCall,
|
99
|
+
ArrayVariableValueItem_Image,
|
89
100
|
]
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_workflow_node_result_event.py
RENAMED
@@ -23,7 +23,9 @@ class FulfilledWorkflowNodeResultEvent(pydantic.BaseModel):
|
|
23
23
|
node_result_id: str
|
24
24
|
ts: typing.Optional[dt.datetime]
|
25
25
|
data: typing.Optional[WorkflowNodeResultData]
|
26
|
+
source_execution_id: typing.Optional[str]
|
26
27
|
output_values: typing.Optional[typing.List[NodeOutputCompiledValue]]
|
28
|
+
mocked: typing.Optional[bool]
|
27
29
|
|
28
30
|
def json(self, **kwargs: typing.Any) -> str:
|
29
31
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -17,7 +17,7 @@ class GenerateRequest(pydantic.BaseModel):
|
|
17
17
|
description="Key/value pairs for each template variable defined in the deployment's prompt."
|
18
18
|
)
|
19
19
|
chat_history: typing.Optional[typing.List[ChatMessageRequest]] = pydantic.Field(
|
20
|
-
description="Optionally provide a list of chat messages that'll be used in place of the special
|
20
|
+
description="Optionally provide a list of chat messages that'll be used in place of the special chat_history variable, if included in the prompt."
|
21
21
|
)
|
22
22
|
external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
|
23
23
|
description="Optionally include a unique identifier for each generation, as represented outside of Vellum. Note that this should generally be a list of length one."
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .vellum_image import VellumImage
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class ImageVariableValue(pydantic.BaseModel):
|
16
|
+
"""
|
17
|
+
A base Vellum primitive value representing an image.
|
18
|
+
"""
|
19
|
+
|
20
|
+
value: typing.Optional[VellumImage]
|
21
|
+
|
22
|
+
def json(self, **kwargs: typing.Any) -> str:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().json(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
28
|
+
return super().dict(**kwargs_with_defaults)
|
29
|
+
|
30
|
+
class Config:
|
31
|
+
frozen = True
|
32
|
+
smart_union = True
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/initiated_workflow_node_result_event.py
RENAMED
@@ -23,6 +23,7 @@ class InitiatedWorkflowNodeResultEvent(pydantic.BaseModel):
|
|
23
23
|
node_result_id: str
|
24
24
|
ts: typing.Optional[dt.datetime]
|
25
25
|
data: typing.Optional[WorkflowNodeResultData]
|
26
|
+
source_execution_id: typing.Optional[str]
|
26
27
|
input_values: typing.Optional[typing.List[NodeInputVariableCompiledValue]]
|
27
28
|
|
28
29
|
def json(self, **kwargs: typing.Any) -> str:
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/rejected_workflow_node_result_event.py
RENAMED
@@ -23,6 +23,7 @@ class RejectedWorkflowNodeResultEvent(pydantic.BaseModel):
|
|
23
23
|
node_result_id: str
|
24
24
|
ts: typing.Optional[dt.datetime]
|
25
25
|
data: typing.Optional[WorkflowNodeResultData]
|
26
|
+
source_execution_id: typing.Optional[str]
|
26
27
|
error: WorkflowEventError
|
27
28
|
|
28
29
|
def json(self, **kwargs: typing.Any) -> str:
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/streaming_workflow_node_result_event.py
RENAMED
@@ -23,6 +23,7 @@ class StreamingWorkflowNodeResultEvent(pydantic.BaseModel):
|
|
23
23
|
node_result_id: str
|
24
24
|
ts: typing.Optional[dt.datetime]
|
25
25
|
data: typing.Optional[WorkflowNodeResultData]
|
26
|
+
source_execution_id: typing.Optional[str]
|
26
27
|
output: typing.Optional[NodeOutputCompiledValue]
|
27
28
|
output_index: typing.Optional[int]
|
28
29
|
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_event_error_code.py
RENAMED
@@ -9,6 +9,7 @@ T_Result = typing.TypeVar("T_Result")
|
|
9
9
|
class WorkflowExecutionEventErrorCode(str, enum.Enum):
|
10
10
|
"""
|
11
11
|
- `WORKFLOW_INITIALIZATION` - WORKFLOW_INITIALIZATION
|
12
|
+
- `WORKFLOW_CANCELLED` - WORKFLOW_CANCELLED
|
12
13
|
- `NODE_EXECUTION_COUNT_LIMIT_REACHED` - NODE_EXECUTION_COUNT_LIMIT_REACHED
|
13
14
|
- `INTERNAL_SERVER_ERROR` - INTERNAL_SERVER_ERROR
|
14
15
|
- `NODE_EXECUTION` - NODE_EXECUTION
|
@@ -18,6 +19,7 @@ class WorkflowExecutionEventErrorCode(str, enum.Enum):
|
|
18
19
|
"""
|
19
20
|
|
20
21
|
WORKFLOW_INITIALIZATION = "WORKFLOW_INITIALIZATION"
|
22
|
+
WORKFLOW_CANCELLED = "WORKFLOW_CANCELLED"
|
21
23
|
NODE_EXECUTION_COUNT_LIMIT_REACHED = "NODE_EXECUTION_COUNT_LIMIT_REACHED"
|
22
24
|
INTERNAL_SERVER_ERROR = "INTERNAL_SERVER_ERROR"
|
23
25
|
NODE_EXECUTION = "NODE_EXECUTION"
|
@@ -28,6 +30,7 @@ class WorkflowExecutionEventErrorCode(str, enum.Enum):
|
|
28
30
|
def visit(
|
29
31
|
self,
|
30
32
|
workflow_initialization: typing.Callable[[], T_Result],
|
33
|
+
workflow_cancelled: typing.Callable[[], T_Result],
|
31
34
|
node_execution_count_limit_reached: typing.Callable[[], T_Result],
|
32
35
|
internal_server_error: typing.Callable[[], T_Result],
|
33
36
|
node_execution: typing.Callable[[], T_Result],
|
@@ -37,6 +40,8 @@ class WorkflowExecutionEventErrorCode(str, enum.Enum):
|
|
37
40
|
) -> T_Result:
|
38
41
|
if self is WorkflowExecutionEventErrorCode.WORKFLOW_INITIALIZATION:
|
39
42
|
return workflow_initialization()
|
43
|
+
if self is WorkflowExecutionEventErrorCode.WORKFLOW_CANCELLED:
|
44
|
+
return workflow_cancelled()
|
40
45
|
if self is WorkflowExecutionEventErrorCode.NODE_EXECUTION_COUNT_LIMIT_REACHED:
|
41
46
|
return node_execution_count_limit_reached()
|
42
47
|
if self is WorkflowExecutionEventErrorCode.INTERNAL_SERVER_ERROR:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/types/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/types/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_item_request.py
RENAMED
File without changes
|
{vellum_ai-0.3.11 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_request.py
RENAMED
File without changes
|
File without changes
|
File without changes
|