vellum-ai 0.1.4__tar.gz → 0.1.5__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/PKG-INFO +1 -1
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/pyproject.toml +1 -1
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/__init__.py +30 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/client.py +116 -5
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/client_wrapper.py +1 -1
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/forbidden_error.py +3 -2
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/__init__.py +34 -0
- vellum_ai-0.1.5/src/vellum/types/chat_history_input_request.py +30 -0
- vellum_ai-0.1.5/src/vellum/types/error_execute_prompt_response.py +30 -0
- vellum_ai-0.1.5/src/vellum/types/execute_prompt_api_error_response.py +28 -0
- vellum_ai-0.1.5/src/vellum/types/execute_prompt_response.py +43 -0
- vellum_ai-0.1.5/src/vellum/types/json_execute_prompt_response.py +29 -0
- vellum_ai-0.1.5/src/vellum/types/json_input_request.py +29 -0
- vellum_ai-0.1.5/src/vellum/types/prompt_deployment_input_request.py +43 -0
- vellum_ai-0.1.5/src/vellum/types/string_execute_prompt_response.py +29 -0
- vellum_ai-0.1.5/src/vellum/types/string_input_request.py +29 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/README.md +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/api_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/datetime_utils.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/jsonable_encoder.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/core/remove_none_from_dict.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/environment.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/bad_request_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/conflict_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/internal_server_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/errors/not_found_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/py.typed +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/deployments/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/deployments/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/document_indexes/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/document_indexes/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/documents/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/documents/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/model_versions/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/model_versions/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/registered_prompts/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/registered_prompts/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/sandboxes/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/sandboxes/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/test_suites/__init__.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/resources/test_suites/client.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/api_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/api_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/block_type_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/chat_message.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/chat_message_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/chat_message_role.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/conditional_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/conditional_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/deployment_read.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/deployment_status.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/document_document_to_document_index.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/document_index_read.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/document_index_status.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/document_read.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/document_status.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/enriched_normalized_completion.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/environment_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/evaluation_params.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/evaluation_params_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/finish_reason_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_options_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_result_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/generate_stream_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/indexing_state_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/logical_operator.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/logprobs_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_config_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/metadata_filter_rule_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_build_config.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_exec_config.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_read.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_read_status_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_error_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_json_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_number_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_compiled_string_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/normalized_log_probs.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/normalized_token_log_probs.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/paginated_slim_document_list.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/processing_failure_reason_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/processing_state_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_data_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_properties.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_properties_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/prompt_template_block_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/provider_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_model_parameters_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_prompt.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/register_prompt_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_deployment.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_model_version.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_sandbox.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/sandbox_metric_input_params.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/sandbox_metric_input_params_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/sandbox_scenario.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/scenario_input.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/scenario_input_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/scenario_input_type_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_filters_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_request_options_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_result_document.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_result_document_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_result_merging_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_result_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/search_weights_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/slim_document.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/submit_completion_actual_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_chat_history_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_error_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_json_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_number_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_result_output.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_search_results_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/templating_node_string_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_error_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_json_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_number_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_result_output.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_search_results_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/terminal_node_string_result.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_chat_history_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_chat_history_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_error_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_error_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_json_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_json_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_number_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_number_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_search_results_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_search_results_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_string_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_string_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_variable_value.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_case_variable_value_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/test_suite_test_case.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/upload_document_error_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/upload_document_response.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/vellum_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/vellum_error_code_enum.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/vellum_error_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/vellum_variable.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/vellum_variable_type.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_event_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_event_error_code.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_event_type.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_event.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_node_result_event_state.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_input_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_json_input_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_request_string_input_request.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
- {vellum_ai-0.1.4 → vellum_ai-0.1.5}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -4,6 +4,7 @@ from .types import (
|
|
4
4
|
ApiNodeResult,
|
5
5
|
ApiNodeResultData,
|
6
6
|
BlockTypeEnum,
|
7
|
+
ChatHistoryInputRequest,
|
7
8
|
ChatMessage,
|
8
9
|
ChatMessageRequest,
|
9
10
|
ChatMessageRole,
|
@@ -18,8 +19,14 @@ from .types import (
|
|
18
19
|
DocumentStatus,
|
19
20
|
EnrichedNormalizedCompletion,
|
20
21
|
EnvironmentEnum,
|
22
|
+
ErrorExecutePromptResponse,
|
21
23
|
EvaluationParams,
|
22
24
|
EvaluationParamsRequest,
|
25
|
+
ExecutePromptApiErrorResponse,
|
26
|
+
ExecutePromptResponse,
|
27
|
+
ExecutePromptResponse_Error,
|
28
|
+
ExecutePromptResponse_Json,
|
29
|
+
ExecutePromptResponse_String,
|
23
30
|
ExecuteWorkflowStreamErrorResponse,
|
24
31
|
FinishReasonEnum,
|
25
32
|
GenerateErrorResponse,
|
@@ -33,6 +40,8 @@ from .types import (
|
|
33
40
|
GenerateStreamResult,
|
34
41
|
GenerateStreamResultData,
|
35
42
|
IndexingStateEnum,
|
43
|
+
JsonExecutePromptResponse,
|
44
|
+
JsonInputRequest,
|
36
45
|
LogicalOperator,
|
37
46
|
LogprobsEnum,
|
38
47
|
MetadataFilterConfigRequest,
|
@@ -62,6 +71,10 @@ from .types import (
|
|
62
71
|
PaginatedSlimDocumentList,
|
63
72
|
ProcessingFailureReasonEnum,
|
64
73
|
ProcessingStateEnum,
|
74
|
+
PromptDeploymentInputRequest,
|
75
|
+
PromptDeploymentInputRequest_ChatHistory,
|
76
|
+
PromptDeploymentInputRequest_Json,
|
77
|
+
PromptDeploymentInputRequest_String,
|
65
78
|
PromptNodeResult,
|
66
79
|
PromptNodeResultData,
|
67
80
|
PromptTemplateBlock,
|
@@ -100,6 +113,8 @@ from .types import (
|
|
100
113
|
SearchResultRequest,
|
101
114
|
SearchWeightsRequest,
|
102
115
|
SlimDocument,
|
116
|
+
StringExecutePromptResponse,
|
117
|
+
StringInputRequest,
|
103
118
|
SubmitCompletionActualRequest,
|
104
119
|
SubmitCompletionActualsErrorResponse,
|
105
120
|
SubmitWorkflowExecutionActualRequest,
|
@@ -229,6 +244,7 @@ __all__ = [
|
|
229
244
|
"ApiNodeResultData",
|
230
245
|
"BadRequestError",
|
231
246
|
"BlockTypeEnum",
|
247
|
+
"ChatHistoryInputRequest",
|
232
248
|
"ChatMessage",
|
233
249
|
"ChatMessageRequest",
|
234
250
|
"ChatMessageRole",
|
@@ -244,8 +260,14 @@ __all__ = [
|
|
244
260
|
"DocumentStatus",
|
245
261
|
"EnrichedNormalizedCompletion",
|
246
262
|
"EnvironmentEnum",
|
263
|
+
"ErrorExecutePromptResponse",
|
247
264
|
"EvaluationParams",
|
248
265
|
"EvaluationParamsRequest",
|
266
|
+
"ExecutePromptApiErrorResponse",
|
267
|
+
"ExecutePromptResponse",
|
268
|
+
"ExecutePromptResponse_Error",
|
269
|
+
"ExecutePromptResponse_Json",
|
270
|
+
"ExecutePromptResponse_String",
|
249
271
|
"ExecuteWorkflowStreamErrorResponse",
|
250
272
|
"FinishReasonEnum",
|
251
273
|
"ForbiddenError",
|
@@ -261,6 +283,8 @@ __all__ = [
|
|
261
283
|
"GenerateStreamResultData",
|
262
284
|
"IndexingStateEnum",
|
263
285
|
"InternalServerError",
|
286
|
+
"JsonExecutePromptResponse",
|
287
|
+
"JsonInputRequest",
|
264
288
|
"LogicalOperator",
|
265
289
|
"LogprobsEnum",
|
266
290
|
"MetadataFilterConfigRequest",
|
@@ -291,6 +315,10 @@ __all__ = [
|
|
291
315
|
"PaginatedSlimDocumentList",
|
292
316
|
"ProcessingFailureReasonEnum",
|
293
317
|
"ProcessingStateEnum",
|
318
|
+
"PromptDeploymentInputRequest",
|
319
|
+
"PromptDeploymentInputRequest_ChatHistory",
|
320
|
+
"PromptDeploymentInputRequest_Json",
|
321
|
+
"PromptDeploymentInputRequest_String",
|
294
322
|
"PromptNodeResult",
|
295
323
|
"PromptNodeResultData",
|
296
324
|
"PromptTemplateBlock",
|
@@ -329,6 +357,8 @@ __all__ = [
|
|
329
357
|
"SearchResultRequest",
|
330
358
|
"SearchWeightsRequest",
|
331
359
|
"SlimDocument",
|
360
|
+
"StringExecutePromptResponse",
|
361
|
+
"StringInputRequest",
|
332
362
|
"SubmitCompletionActualRequest",
|
333
363
|
"SubmitCompletionActualsErrorResponse",
|
334
364
|
"SubmitWorkflowExecutionActualRequest",
|
@@ -22,11 +22,12 @@ from .resources.model_versions.client import AsyncModelVersionsClient, ModelVers
|
|
22
22
|
from .resources.registered_prompts.client import AsyncRegisteredPromptsClient, RegisteredPromptsClient
|
23
23
|
from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
|
24
24
|
from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
|
25
|
-
from .types.
|
25
|
+
from .types.execute_prompt_response import ExecutePromptResponse
|
26
26
|
from .types.generate_options_request import GenerateOptionsRequest
|
27
27
|
from .types.generate_request import GenerateRequest
|
28
28
|
from .types.generate_response import GenerateResponse
|
29
29
|
from .types.generate_stream_response import GenerateStreamResponse
|
30
|
+
from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
|
30
31
|
from .types.search_request_options_request import SearchRequestOptionsRequest
|
31
32
|
from .types.search_response import SearchResponse
|
32
33
|
from .types.submit_completion_actual_request import SubmitCompletionActualRequest
|
@@ -66,6 +67,61 @@ class Vellum:
|
|
66
67
|
self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
|
67
68
|
self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
|
68
69
|
|
70
|
+
def execute_prompt(
|
71
|
+
self,
|
72
|
+
*,
|
73
|
+
inputs: typing.List[PromptDeploymentInputRequest],
|
74
|
+
prompt_deployment_id: typing.Optional[str] = OMIT,
|
75
|
+
prompt_deployment_name: typing.Optional[str] = OMIT,
|
76
|
+
release_tag: typing.Optional[str] = OMIT,
|
77
|
+
external_id: typing.Optional[str] = OMIT,
|
78
|
+
) -> ExecutePromptResponse:
|
79
|
+
"""
|
80
|
+
Executes a deployed Prompt and returns the result.
|
81
|
+
|
82
|
+
Parameters:
|
83
|
+
- inputs: typing.List[PromptDeploymentInputRequest].
|
84
|
+
|
85
|
+
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
86
|
+
|
87
|
+
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
88
|
+
|
89
|
+
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
90
|
+
|
91
|
+
- external_id: typing.Optional[str].
|
92
|
+
"""
|
93
|
+
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
94
|
+
if prompt_deployment_id is not OMIT:
|
95
|
+
_request["prompt_deployment_id"] = prompt_deployment_id
|
96
|
+
if prompt_deployment_name is not OMIT:
|
97
|
+
_request["prompt_deployment_name"] = prompt_deployment_name
|
98
|
+
if release_tag is not OMIT:
|
99
|
+
_request["release_tag"] = release_tag
|
100
|
+
if external_id is not OMIT:
|
101
|
+
_request["external_id"] = external_id
|
102
|
+
_response = self._client_wrapper.httpx_client.request(
|
103
|
+
"POST",
|
104
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-prompt"),
|
105
|
+
json=jsonable_encoder(_request),
|
106
|
+
headers=self._client_wrapper.get_headers(),
|
107
|
+
timeout=None,
|
108
|
+
)
|
109
|
+
if 200 <= _response.status_code < 300:
|
110
|
+
return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
|
111
|
+
if _response.status_code == 400:
|
112
|
+
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
113
|
+
if _response.status_code == 403:
|
114
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
115
|
+
if _response.status_code == 404:
|
116
|
+
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
117
|
+
if _response.status_code == 500:
|
118
|
+
raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
119
|
+
try:
|
120
|
+
_response_json = _response.json()
|
121
|
+
except JSONDecodeError:
|
122
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
123
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
124
|
+
|
69
125
|
def execute_workflow_stream(
|
70
126
|
self,
|
71
127
|
*,
|
@@ -187,7 +243,7 @@ class Vellum:
|
|
187
243
|
if _response.status_code == 400:
|
188
244
|
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
189
245
|
if _response.status_code == 403:
|
190
|
-
raise ForbiddenError(pydantic.parse_obj_as(
|
246
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
191
247
|
if _response.status_code == 404:
|
192
248
|
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
193
249
|
if _response.status_code == 500:
|
@@ -244,7 +300,7 @@ class Vellum:
|
|
244
300
|
if _response.status_code == 400:
|
245
301
|
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
246
302
|
if _response.status_code == 403:
|
247
|
-
raise ForbiddenError(pydantic.parse_obj_as(
|
303
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
248
304
|
if _response.status_code == 404:
|
249
305
|
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
250
306
|
if _response.status_code == 500:
|
@@ -432,6 +488,61 @@ class AsyncVellum:
|
|
432
488
|
self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
|
433
489
|
self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
|
434
490
|
|
491
|
+
async def execute_prompt(
|
492
|
+
self,
|
493
|
+
*,
|
494
|
+
inputs: typing.List[PromptDeploymentInputRequest],
|
495
|
+
prompt_deployment_id: typing.Optional[str] = OMIT,
|
496
|
+
prompt_deployment_name: typing.Optional[str] = OMIT,
|
497
|
+
release_tag: typing.Optional[str] = OMIT,
|
498
|
+
external_id: typing.Optional[str] = OMIT,
|
499
|
+
) -> ExecutePromptResponse:
|
500
|
+
"""
|
501
|
+
Executes a deployed Prompt and returns the result.
|
502
|
+
|
503
|
+
Parameters:
|
504
|
+
- inputs: typing.List[PromptDeploymentInputRequest].
|
505
|
+
|
506
|
+
- prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
507
|
+
|
508
|
+
- prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
509
|
+
|
510
|
+
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
511
|
+
|
512
|
+
- external_id: typing.Optional[str].
|
513
|
+
"""
|
514
|
+
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
515
|
+
if prompt_deployment_id is not OMIT:
|
516
|
+
_request["prompt_deployment_id"] = prompt_deployment_id
|
517
|
+
if prompt_deployment_name is not OMIT:
|
518
|
+
_request["prompt_deployment_name"] = prompt_deployment_name
|
519
|
+
if release_tag is not OMIT:
|
520
|
+
_request["release_tag"] = release_tag
|
521
|
+
if external_id is not OMIT:
|
522
|
+
_request["external_id"] = external_id
|
523
|
+
_response = await self._client_wrapper.httpx_client.request(
|
524
|
+
"POST",
|
525
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-prompt"),
|
526
|
+
json=jsonable_encoder(_request),
|
527
|
+
headers=self._client_wrapper.get_headers(),
|
528
|
+
timeout=None,
|
529
|
+
)
|
530
|
+
if 200 <= _response.status_code < 300:
|
531
|
+
return pydantic.parse_obj_as(ExecutePromptResponse, _response.json()) # type: ignore
|
532
|
+
if _response.status_code == 400:
|
533
|
+
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
534
|
+
if _response.status_code == 403:
|
535
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
536
|
+
if _response.status_code == 404:
|
537
|
+
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
538
|
+
if _response.status_code == 500:
|
539
|
+
raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
540
|
+
try:
|
541
|
+
_response_json = _response.json()
|
542
|
+
except JSONDecodeError:
|
543
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
544
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
545
|
+
|
435
546
|
async def execute_workflow_stream(
|
436
547
|
self,
|
437
548
|
*,
|
@@ -553,7 +664,7 @@ class AsyncVellum:
|
|
553
664
|
if _response.status_code == 400:
|
554
665
|
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
555
666
|
if _response.status_code == 403:
|
556
|
-
raise ForbiddenError(pydantic.parse_obj_as(
|
667
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
557
668
|
if _response.status_code == 404:
|
558
669
|
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
559
670
|
if _response.status_code == 500:
|
@@ -610,7 +721,7 @@ class AsyncVellum:
|
|
610
721
|
if _response.status_code == 400:
|
611
722
|
raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
612
723
|
if _response.status_code == 403:
|
613
|
-
raise ForbiddenError(pydantic.parse_obj_as(
|
724
|
+
raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
614
725
|
if _response.status_code == 404:
|
615
726
|
raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
616
727
|
if _response.status_code == 500:
|
@@ -16,7 +16,7 @@ class BaseClientWrapper:
|
|
16
16
|
headers: typing.Dict[str, str] = {
|
17
17
|
"X-Fern-Language": "Python",
|
18
18
|
"X-Fern-SDK-Name": "vellum-ai",
|
19
|
-
"X-Fern-SDK-Version": "v0.1.
|
19
|
+
"X-Fern-SDK-Version": "v0.1.5",
|
20
20
|
}
|
21
21
|
headers["X_API_KEY"] = self.api_key
|
22
22
|
return headers
|
@@ -1,9 +1,10 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
|
+
import typing
|
4
|
+
|
3
5
|
from ..core.api_error import ApiError
|
4
|
-
from ..types.generate_error_response import GenerateErrorResponse
|
5
6
|
|
6
7
|
|
7
8
|
class ForbiddenError(ApiError):
|
8
|
-
def __init__(self, body:
|
9
|
+
def __init__(self, body: typing.Any):
|
9
10
|
super().__init__(status_code=403, body=body)
|
@@ -3,6 +3,7 @@
|
|
3
3
|
from .api_node_result import ApiNodeResult
|
4
4
|
from .api_node_result_data import ApiNodeResultData
|
5
5
|
from .block_type_enum import BlockTypeEnum
|
6
|
+
from .chat_history_input_request import ChatHistoryInputRequest
|
6
7
|
from .chat_message import ChatMessage
|
7
8
|
from .chat_message_request import ChatMessageRequest
|
8
9
|
from .chat_message_role import ChatMessageRole
|
@@ -17,8 +18,16 @@ from .document_read import DocumentRead
|
|
17
18
|
from .document_status import DocumentStatus
|
18
19
|
from .enriched_normalized_completion import EnrichedNormalizedCompletion
|
19
20
|
from .environment_enum import EnvironmentEnum
|
21
|
+
from .error_execute_prompt_response import ErrorExecutePromptResponse
|
20
22
|
from .evaluation_params import EvaluationParams
|
21
23
|
from .evaluation_params_request import EvaluationParamsRequest
|
24
|
+
from .execute_prompt_api_error_response import ExecutePromptApiErrorResponse
|
25
|
+
from .execute_prompt_response import (
|
26
|
+
ExecutePromptResponse,
|
27
|
+
ExecutePromptResponse_Error,
|
28
|
+
ExecutePromptResponse_Json,
|
29
|
+
ExecutePromptResponse_String,
|
30
|
+
)
|
22
31
|
from .execute_workflow_stream_error_response import ExecuteWorkflowStreamErrorResponse
|
23
32
|
from .finish_reason_enum import FinishReasonEnum
|
24
33
|
from .generate_error_response import GenerateErrorResponse
|
@@ -32,6 +41,8 @@ from .generate_stream_response import GenerateStreamResponse
|
|
32
41
|
from .generate_stream_result import GenerateStreamResult
|
33
42
|
from .generate_stream_result_data import GenerateStreamResultData
|
34
43
|
from .indexing_state_enum import IndexingStateEnum
|
44
|
+
from .json_execute_prompt_response import JsonExecutePromptResponse
|
45
|
+
from .json_input_request import JsonInputRequest
|
35
46
|
from .logical_operator import LogicalOperator
|
36
47
|
from .logprobs_enum import LogprobsEnum
|
37
48
|
from .metadata_filter_config_request import MetadataFilterConfigRequest
|
@@ -63,6 +74,12 @@ from .normalized_token_log_probs import NormalizedTokenLogProbs
|
|
63
74
|
from .paginated_slim_document_list import PaginatedSlimDocumentList
|
64
75
|
from .processing_failure_reason_enum import ProcessingFailureReasonEnum
|
65
76
|
from .processing_state_enum import ProcessingStateEnum
|
77
|
+
from .prompt_deployment_input_request import (
|
78
|
+
PromptDeploymentInputRequest,
|
79
|
+
PromptDeploymentInputRequest_ChatHistory,
|
80
|
+
PromptDeploymentInputRequest_Json,
|
81
|
+
PromptDeploymentInputRequest_String,
|
82
|
+
)
|
66
83
|
from .prompt_node_result import PromptNodeResult
|
67
84
|
from .prompt_node_result_data import PromptNodeResultData
|
68
85
|
from .prompt_template_block import PromptTemplateBlock
|
@@ -101,6 +118,8 @@ from .search_result_merging_request import SearchResultMergingRequest
|
|
101
118
|
from .search_result_request import SearchResultRequest
|
102
119
|
from .search_weights_request import SearchWeightsRequest
|
103
120
|
from .slim_document import SlimDocument
|
121
|
+
from .string_execute_prompt_response import StringExecutePromptResponse
|
122
|
+
from .string_input_request import StringInputRequest
|
104
123
|
from .submit_completion_actual_request import SubmitCompletionActualRequest
|
105
124
|
from .submit_completion_actuals_error_response import SubmitCompletionActualsErrorResponse
|
106
125
|
from .submit_workflow_execution_actual_request import (
|
@@ -231,6 +250,7 @@ __all__ = [
|
|
231
250
|
"ApiNodeResult",
|
232
251
|
"ApiNodeResultData",
|
233
252
|
"BlockTypeEnum",
|
253
|
+
"ChatHistoryInputRequest",
|
234
254
|
"ChatMessage",
|
235
255
|
"ChatMessageRequest",
|
236
256
|
"ChatMessageRole",
|
@@ -245,8 +265,14 @@ __all__ = [
|
|
245
265
|
"DocumentStatus",
|
246
266
|
"EnrichedNormalizedCompletion",
|
247
267
|
"EnvironmentEnum",
|
268
|
+
"ErrorExecutePromptResponse",
|
248
269
|
"EvaluationParams",
|
249
270
|
"EvaluationParamsRequest",
|
271
|
+
"ExecutePromptApiErrorResponse",
|
272
|
+
"ExecutePromptResponse",
|
273
|
+
"ExecutePromptResponse_Error",
|
274
|
+
"ExecutePromptResponse_Json",
|
275
|
+
"ExecutePromptResponse_String",
|
250
276
|
"ExecuteWorkflowStreamErrorResponse",
|
251
277
|
"FinishReasonEnum",
|
252
278
|
"GenerateErrorResponse",
|
@@ -260,6 +286,8 @@ __all__ = [
|
|
260
286
|
"GenerateStreamResult",
|
261
287
|
"GenerateStreamResultData",
|
262
288
|
"IndexingStateEnum",
|
289
|
+
"JsonExecutePromptResponse",
|
290
|
+
"JsonInputRequest",
|
263
291
|
"LogicalOperator",
|
264
292
|
"LogprobsEnum",
|
265
293
|
"MetadataFilterConfigRequest",
|
@@ -289,6 +317,10 @@ __all__ = [
|
|
289
317
|
"PaginatedSlimDocumentList",
|
290
318
|
"ProcessingFailureReasonEnum",
|
291
319
|
"ProcessingStateEnum",
|
320
|
+
"PromptDeploymentInputRequest",
|
321
|
+
"PromptDeploymentInputRequest_ChatHistory",
|
322
|
+
"PromptDeploymentInputRequest_Json",
|
323
|
+
"PromptDeploymentInputRequest_String",
|
292
324
|
"PromptNodeResult",
|
293
325
|
"PromptNodeResultData",
|
294
326
|
"PromptTemplateBlock",
|
@@ -327,6 +359,8 @@ __all__ = [
|
|
327
359
|
"SearchResultRequest",
|
328
360
|
"SearchWeightsRequest",
|
329
361
|
"SlimDocument",
|
362
|
+
"StringExecutePromptResponse",
|
363
|
+
"StringInputRequest",
|
330
364
|
"SubmitCompletionActualRequest",
|
331
365
|
"SubmitCompletionActualsErrorResponse",
|
332
366
|
"SubmitWorkflowExecutionActualRequest",
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .chat_message_request import ChatMessageRequest
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class ChatHistoryInputRequest(pydantic.BaseModel):
|
16
|
+
name: str = pydantic.Field(description="The variable's name, as defined in the deployment.")
|
17
|
+
value: typing.List[ChatMessageRequest]
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().dict(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
class Config:
|
28
|
+
frozen = True
|
29
|
+
smart_union = True
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .vellum_error import VellumError
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class ErrorExecutePromptResponse(pydantic.BaseModel):
|
16
|
+
value: VellumError
|
17
|
+
execution_id: str
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().dict(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
class Config:
|
28
|
+
frozen = True
|
29
|
+
smart_union = True
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class ExecutePromptApiErrorResponse(pydantic.BaseModel):
|
15
|
+
detail: str = pydantic.Field(description="Details about why the request failed.")
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
from .error_execute_prompt_response import ErrorExecutePromptResponse
|
10
|
+
from .json_execute_prompt_response import JsonExecutePromptResponse
|
11
|
+
from .string_execute_prompt_response import StringExecutePromptResponse
|
12
|
+
|
13
|
+
|
14
|
+
class ExecutePromptResponse_Error(ErrorExecutePromptResponse):
|
15
|
+
type: typing_extensions.Literal["ERROR"]
|
16
|
+
|
17
|
+
class Config:
|
18
|
+
frozen = True
|
19
|
+
smart_union = True
|
20
|
+
allow_population_by_field_name = True
|
21
|
+
|
22
|
+
|
23
|
+
class ExecutePromptResponse_Json(JsonExecutePromptResponse):
|
24
|
+
type: typing_extensions.Literal["JSON"]
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
allow_population_by_field_name = True
|
30
|
+
|
31
|
+
|
32
|
+
class ExecutePromptResponse_String(StringExecutePromptResponse):
|
33
|
+
type: typing_extensions.Literal["STRING"]
|
34
|
+
|
35
|
+
class Config:
|
36
|
+
frozen = True
|
37
|
+
smart_union = True
|
38
|
+
allow_population_by_field_name = True
|
39
|
+
|
40
|
+
|
41
|
+
ExecutePromptResponse = typing.Union[
|
42
|
+
ExecutePromptResponse_Error, ExecutePromptResponse_Json, ExecutePromptResponse_String
|
43
|
+
]
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class JsonExecutePromptResponse(pydantic.BaseModel):
|
15
|
+
value: typing.Dict[str, typing.Any]
|
16
|
+
execution_id: str
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class JsonInputRequest(pydantic.BaseModel):
|
15
|
+
name: str = pydantic.Field(description="The variable's name, as defined in the deployment.")
|
16
|
+
value: typing.Dict[str, typing.Any]
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
from .chat_history_input_request import ChatHistoryInputRequest
|
10
|
+
from .json_input_request import JsonInputRequest
|
11
|
+
from .string_input_request import StringInputRequest
|
12
|
+
|
13
|
+
|
14
|
+
class PromptDeploymentInputRequest_String(StringInputRequest):
|
15
|
+
type: typing_extensions.Literal["STRING"]
|
16
|
+
|
17
|
+
class Config:
|
18
|
+
frozen = True
|
19
|
+
smart_union = True
|
20
|
+
allow_population_by_field_name = True
|
21
|
+
|
22
|
+
|
23
|
+
class PromptDeploymentInputRequest_Json(JsonInputRequest):
|
24
|
+
type: typing_extensions.Literal["JSON"]
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
allow_population_by_field_name = True
|
30
|
+
|
31
|
+
|
32
|
+
class PromptDeploymentInputRequest_ChatHistory(ChatHistoryInputRequest):
|
33
|
+
type: typing_extensions.Literal["CHAT_HISTORY"]
|
34
|
+
|
35
|
+
class Config:
|
36
|
+
frozen = True
|
37
|
+
smart_union = True
|
38
|
+
allow_population_by_field_name = True
|
39
|
+
|
40
|
+
|
41
|
+
PromptDeploymentInputRequest = typing.Union[
|
42
|
+
PromptDeploymentInputRequest_String, PromptDeploymentInputRequest_Json, PromptDeploymentInputRequest_ChatHistory
|
43
|
+
]
|