vellum_ai 0.3.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/core/file_utilities.rb +26 -0
- data/lib/environment.rb +11 -0
- data/lib/gemconfig.rb +14 -0
- data/lib/requests.rb +88 -0
- data/lib/types_export.rb +255 -0
- data/lib/vellum_ai/deployments/client.rb +157 -0
- data/lib/vellum_ai/deployments/types/deployments_list_request_status.rb +8 -0
- data/lib/vellum_ai/document_indexes/client.rb +135 -0
- data/lib/vellum_ai/documents/client.rb +245 -0
- data/lib/vellum_ai/model_versions/client.rb +61 -0
- data/lib/vellum_ai/registered_prompts/client.rb +154 -0
- data/lib/vellum_ai/sandboxes/client.rb +132 -0
- data/lib/vellum_ai/test_suites/client.rb +130 -0
- data/lib/vellum_ai/types/api_node_result.rb +52 -0
- data/lib/vellum_ai/types/api_node_result_data.rb +80 -0
- data/lib/vellum_ai/types/array_chat_message_content.rb +50 -0
- data/lib/vellum_ai/types/array_chat_message_content_item.rb +103 -0
- data/lib/vellum_ai/types/array_chat_message_content_item_request.rb +103 -0
- data/lib/vellum_ai/types/array_chat_message_content_request.rb +50 -0
- data/lib/vellum_ai/types/block_type_enum.rb +11 -0
- data/lib/vellum_ai/types/chat_history_enum.rb +5 -0
- data/lib/vellum_ai/types/chat_history_input_request.rb +55 -0
- data/lib/vellum_ai/types/chat_message.rb +62 -0
- data/lib/vellum_ai/types/chat_message_content.rb +116 -0
- data/lib/vellum_ai/types/chat_message_content_request.rb +116 -0
- data/lib/vellum_ai/types/chat_message_request.rb +62 -0
- data/lib/vellum_ai/types/chat_message_role.rb +6 -0
- data/lib/vellum_ai/types/code_execution_node_chat_history_result.rb +54 -0
- data/lib/vellum_ai/types/code_execution_node_error_result.rb +56 -0
- data/lib/vellum_ai/types/code_execution_node_json_result.rb +50 -0
- data/lib/vellum_ai/types/code_execution_node_number_result.rb +50 -0
- data/lib/vellum_ai/types/code_execution_node_result.rb +52 -0
- data/lib/vellum_ai/types/code_execution_node_result_data.rb +51 -0
- data/lib/vellum_ai/types/code_execution_node_result_output.rb +142 -0
- data/lib/vellum_ai/types/code_execution_node_search_results_result.rb +54 -0
- data/lib/vellum_ai/types/code_execution_node_string_result.rb +50 -0
- data/lib/vellum_ai/types/conditional_node_result.rb +52 -0
- data/lib/vellum_ai/types/conditional_node_result_data.rb +45 -0
- data/lib/vellum_ai/types/deployment_provider_payload_response.rb +45 -0
- data/lib/vellum_ai/types/deployment_read.rb +115 -0
- data/lib/vellum_ai/types/document_document_to_document_index.rb +70 -0
- data/lib/vellum_ai/types/document_index_read.rb +98 -0
- data/lib/vellum_ai/types/document_read.rb +121 -0
- data/lib/vellum_ai/types/document_status.rb +5 -0
- data/lib/vellum_ai/types/enriched_normalized_completion.rb +118 -0
- data/lib/vellum_ai/types/entity_status.rb +6 -0
- data/lib/vellum_ai/types/environment_enum.rb +6 -0
- data/lib/vellum_ai/types/error_enum.rb +5 -0
- data/lib/vellum_ai/types/error_variable_value.rb +51 -0
- data/lib/vellum_ai/types/execute_prompt_api_error_response.rb +45 -0
- data/lib/vellum_ai/types/execute_prompt_event.rb +116 -0
- data/lib/vellum_ai/types/execute_prompt_response.rb +90 -0
- data/lib/vellum_ai/types/execute_workflow_error_response.rb +45 -0
- data/lib/vellum_ai/types/execute_workflow_response.rb +67 -0
- data/lib/vellum_ai/types/execute_workflow_stream_error_response.rb +45 -0
- data/lib/vellum_ai/types/execute_workflow_workflow_result_event.rb +90 -0
- data/lib/vellum_ai/types/finish_reason_enum.rb +6 -0
- data/lib/vellum_ai/types/fulfilled_enum.rb +5 -0
- data/lib/vellum_ai/types/fulfilled_execute_prompt_event.rb +66 -0
- data/lib/vellum_ai/types/fulfilled_execute_prompt_response.rb +71 -0
- data/lib/vellum_ai/types/fulfilled_execute_workflow_workflow_result_event.rb +61 -0
- data/lib/vellum_ai/types/fulfilled_function_call.rb +56 -0
- data/lib/vellum_ai/types/fulfilled_prompt_execution_meta.rb +52 -0
- data/lib/vellum_ai/types/fulfilled_workflow_node_result_event.rb +90 -0
- data/lib/vellum_ai/types/function_call.rb +90 -0
- data/lib/vellum_ai/types/function_call_chat_message_content.rb +52 -0
- data/lib/vellum_ai/types/function_call_chat_message_content_request.rb +52 -0
- data/lib/vellum_ai/types/function_call_chat_message_content_value.rb +56 -0
- data/lib/vellum_ai/types/function_call_chat_message_content_value_request.rb +56 -0
- data/lib/vellum_ai/types/function_call_enum.rb +5 -0
- data/lib/vellum_ai/types/function_call_variable_value.rb +51 -0
- data/lib/vellum_ai/types/generate_error_response.rb +45 -0
- data/lib/vellum_ai/types/generate_options_request.rb +50 -0
- data/lib/vellum_ai/types/generate_request.rb +60 -0
- data/lib/vellum_ai/types/generate_response.rb +49 -0
- data/lib/vellum_ai/types/generate_result.rb +62 -0
- data/lib/vellum_ai/types/generate_result_data.rb +49 -0
- data/lib/vellum_ai/types/generate_result_error.rb +45 -0
- data/lib/vellum_ai/types/generate_stream_response.rb +51 -0
- data/lib/vellum_ai/types/generate_stream_result.rb +67 -0
- data/lib/vellum_ai/types/generate_stream_result_data.rb +56 -0
- data/lib/vellum_ai/types/image_chat_message_content.rb +52 -0
- data/lib/vellum_ai/types/image_chat_message_content_request.rb +52 -0
- data/lib/vellum_ai/types/image_enum.rb +5 -0
- data/lib/vellum_ai/types/indexing_state_enum.rb +12 -0
- data/lib/vellum_ai/types/initiated_enum.rb +5 -0
- data/lib/vellum_ai/types/initiated_execute_prompt_event.rb +57 -0
- data/lib/vellum_ai/types/initiated_prompt_execution_meta.rb +68 -0
- data/lib/vellum_ai/types/initiated_workflow_node_result_event.rb +90 -0
- data/lib/vellum_ai/types/json_enum.rb +5 -0
- data/lib/vellum_ai/types/json_input_request.rb +51 -0
- data/lib/vellum_ai/types/json_variable_value.rb +45 -0
- data/lib/vellum_ai/types/logical_operator.rb +25 -0
- data/lib/vellum_ai/types/logprobs_enum.rb +6 -0
- data/lib/vellum_ai/types/metadata_filter_config_request.rb +85 -0
- data/lib/vellum_ai/types/metadata_filter_rule_combinator.rb +6 -0
- data/lib/vellum_ai/types/metadata_filter_rule_request.rb +84 -0
- data/lib/vellum_ai/types/model_version_build_config.rb +66 -0
- data/lib/vellum_ai/types/model_version_exec_config.rb +90 -0
- data/lib/vellum_ai/types/model_version_exec_config_parameters.rb +98 -0
- data/lib/vellum_ai/types/model_version_read.rb +133 -0
- data/lib/vellum_ai/types/model_version_read_status_enum.rb +11 -0
- data/lib/vellum_ai/types/model_version_sandbox_snapshot.rb +61 -0
- data/lib/vellum_ai/types/named_test_case_chat_history_variable_value_request.rb +54 -0
- data/lib/vellum_ai/types/named_test_case_error_variable_value_request.rb +56 -0
- data/lib/vellum_ai/types/named_test_case_json_variable_value_request.rb +50 -0
- data/lib/vellum_ai/types/named_test_case_number_variable_value_request.rb +50 -0
- data/lib/vellum_ai/types/named_test_case_search_results_variable_value_request.rb +54 -0
- data/lib/vellum_ai/types/named_test_case_string_variable_value_request.rb +50 -0
- data/lib/vellum_ai/types/named_test_case_variable_value_request.rb +142 -0
- data/lib/vellum_ai/types/node_input_compiled_chat_history_value.rb +59 -0
- data/lib/vellum_ai/types/node_input_compiled_error_value.rb +61 -0
- data/lib/vellum_ai/types/node_input_compiled_json_value.rb +55 -0
- data/lib/vellum_ai/types/node_input_compiled_number_value.rb +55 -0
- data/lib/vellum_ai/types/node_input_compiled_search_results_value.rb +59 -0
- data/lib/vellum_ai/types/node_input_compiled_string_value.rb +55 -0
- data/lib/vellum_ai/types/node_input_variable_compiled_value.rb +142 -0
- data/lib/vellum_ai/types/node_output_compiled_chat_history_value.rb +54 -0
- data/lib/vellum_ai/types/node_output_compiled_error_value.rb +56 -0
- data/lib/vellum_ai/types/node_output_compiled_json_value.rb +50 -0
- data/lib/vellum_ai/types/node_output_compiled_number_value.rb +50 -0
- data/lib/vellum_ai/types/node_output_compiled_search_results_value.rb +54 -0
- data/lib/vellum_ai/types/node_output_compiled_string_value.rb +50 -0
- data/lib/vellum_ai/types/node_output_compiled_value.rb +142 -0
- data/lib/vellum_ai/types/normalized_log_probs.rb +54 -0
- data/lib/vellum_ai/types/normalized_token_log_probs.rb +61 -0
- data/lib/vellum_ai/types/number_enum.rb +5 -0
- data/lib/vellum_ai/types/paginated_slim_deployment_read_list.rb +64 -0
- data/lib/vellum_ai/types/paginated_slim_document_list.rb +64 -0
- data/lib/vellum_ai/types/paginated_slim_workflow_deployment_list.rb +64 -0
- data/lib/vellum_ai/types/processing_failure_reason_enum.rb +9 -0
- data/lib/vellum_ai/types/processing_state_enum.rb +11 -0
- data/lib/vellum_ai/types/prompt_deployment_expand_meta_request_request.rb +74 -0
- data/lib/vellum_ai/types/prompt_deployment_input_request.rb +103 -0
- data/lib/vellum_ai/types/prompt_execution_meta.rb +76 -0
- data/lib/vellum_ai/types/prompt_node_result.rb +52 -0
- data/lib/vellum_ai/types/prompt_node_result_data.rb +55 -0
- data/lib/vellum_ai/types/prompt_output.rb +116 -0
- data/lib/vellum_ai/types/prompt_template_block.rb +62 -0
- data/lib/vellum_ai/types/prompt_template_block_data.rb +54 -0
- data/lib/vellum_ai/types/prompt_template_block_data_request.rb +54 -0
- data/lib/vellum_ai/types/prompt_template_block_properties.rb +104 -0
- data/lib/vellum_ai/types/prompt_template_block_properties_request.rb +104 -0
- data/lib/vellum_ai/types/prompt_template_block_request.rb +62 -0
- data/lib/vellum_ai/types/provider_enum.rb +20 -0
- data/lib/vellum_ai/types/raw_prompt_execution_overrides_request.rb +55 -0
- data/lib/vellum_ai/types/register_prompt_error_response.rb +45 -0
- data/lib/vellum_ai/types/register_prompt_model_parameters_request.rb +98 -0
- data/lib/vellum_ai/types/register_prompt_prompt.rb +50 -0
- data/lib/vellum_ai/types/register_prompt_prompt_info_request.rb +60 -0
- data/lib/vellum_ai/types/register_prompt_response.rb +110 -0
- data/lib/vellum_ai/types/registered_prompt_deployment.rb +55 -0
- data/lib/vellum_ai/types/registered_prompt_input_variable_request.rb +56 -0
- data/lib/vellum_ai/types/registered_prompt_model_version.rb +50 -0
- data/lib/vellum_ai/types/registered_prompt_sandbox.rb +50 -0
- data/lib/vellum_ai/types/registered_prompt_sandbox_snapshot.rb +45 -0
- data/lib/vellum_ai/types/rejected_enum.rb +5 -0
- data/lib/vellum_ai/types/rejected_execute_prompt_event.rb +68 -0
- data/lib/vellum_ai/types/rejected_execute_prompt_response.rb +73 -0
- data/lib/vellum_ai/types/rejected_execute_workflow_workflow_result_event.rb +63 -0
- data/lib/vellum_ai/types/rejected_function_call.rb +62 -0
- data/lib/vellum_ai/types/rejected_prompt_execution_meta.rb +52 -0
- data/lib/vellum_ai/types/rejected_workflow_node_result_event.rb +92 -0
- data/lib/vellum_ai/types/sandbox_scenario.rb +59 -0
- data/lib/vellum_ai/types/scenario_input.rb +70 -0
- data/lib/vellum_ai/types/scenario_input_request.rb +70 -0
- data/lib/vellum_ai/types/scenario_input_type_enum.rb +6 -0
- data/lib/vellum_ai/types/search_error_response.rb +45 -0
- data/lib/vellum_ai/types/search_filters_request.rb +56 -0
- data/lib/vellum_ai/types/search_node_result.rb +52 -0
- data/lib/vellum_ai/types/search_node_result_data.rb +70 -0
- data/lib/vellum_ai/types/search_request_options_request.rb +79 -0
- data/lib/vellum_ai/types/search_response.rb +49 -0
- data/lib/vellum_ai/types/search_result.rb +66 -0
- data/lib/vellum_ai/types/search_result_document.rb +60 -0
- data/lib/vellum_ai/types/search_result_document_request.rb +55 -0
- data/lib/vellum_ai/types/search_result_merging_request.rb +45 -0
- data/lib/vellum_ai/types/search_result_request.rb +66 -0
- data/lib/vellum_ai/types/search_results_enum.rb +5 -0
- data/lib/vellum_ai/types/search_weights_request.rb +50 -0
- data/lib/vellum_ai/types/slim_deployment_read.rb +109 -0
- data/lib/vellum_ai/types/slim_document.rb +126 -0
- data/lib/vellum_ai/types/slim_workflow_deployment.rb +118 -0
- data/lib/vellum_ai/types/streaming_enum.rb +5 -0
- data/lib/vellum_ai/types/streaming_execute_prompt_event.rb +85 -0
- data/lib/vellum_ai/types/streaming_prompt_execution_meta.rb +46 -0
- data/lib/vellum_ai/types/streaming_workflow_node_result_event.rb +99 -0
- data/lib/vellum_ai/types/string_chat_message_content.rb +46 -0
- data/lib/vellum_ai/types/string_chat_message_content_request.rb +46 -0
- data/lib/vellum_ai/types/string_enum.rb +5 -0
- data/lib/vellum_ai/types/string_input_request.rb +51 -0
- data/lib/vellum_ai/types/string_variable_value.rb +45 -0
- data/lib/vellum_ai/types/submit_completion_actual_request.rb +67 -0
- data/lib/vellum_ai/types/submit_completion_actuals_error_response.rb +45 -0
- data/lib/vellum_ai/types/submit_workflow_execution_actual_request.rb +103 -0
- data/lib/vellum_ai/types/templating_node_chat_history_result.rb +54 -0
- data/lib/vellum_ai/types/templating_node_error_result.rb +56 -0
- data/lib/vellum_ai/types/templating_node_json_result.rb +50 -0
- data/lib/vellum_ai/types/templating_node_number_result.rb +50 -0
- data/lib/vellum_ai/types/templating_node_result.rb +52 -0
- data/lib/vellum_ai/types/templating_node_result_data.rb +51 -0
- data/lib/vellum_ai/types/templating_node_result_output.rb +142 -0
- data/lib/vellum_ai/types/templating_node_search_results_result.rb +54 -0
- data/lib/vellum_ai/types/templating_node_string_result.rb +50 -0
- data/lib/vellum_ai/types/terminal_node_chat_history_result.rb +59 -0
- data/lib/vellum_ai/types/terminal_node_error_result.rb +61 -0
- data/lib/vellum_ai/types/terminal_node_json_result.rb +55 -0
- data/lib/vellum_ai/types/terminal_node_number_result.rb +55 -0
- data/lib/vellum_ai/types/terminal_node_result.rb +52 -0
- data/lib/vellum_ai/types/terminal_node_result_data.rb +51 -0
- data/lib/vellum_ai/types/terminal_node_result_output.rb +142 -0
- data/lib/vellum_ai/types/terminal_node_search_results_result.rb +59 -0
- data/lib/vellum_ai/types/terminal_node_string_result.rb +55 -0
- data/lib/vellum_ai/types/test_case_chat_history_variable_value.rb +54 -0
- data/lib/vellum_ai/types/test_case_error_variable_value.rb +56 -0
- data/lib/vellum_ai/types/test_case_json_variable_value.rb +50 -0
- data/lib/vellum_ai/types/test_case_number_variable_value.rb +50 -0
- data/lib/vellum_ai/types/test_case_search_results_variable_value.rb +54 -0
- data/lib/vellum_ai/types/test_case_string_variable_value.rb +50 -0
- data/lib/vellum_ai/types/test_case_variable_value.rb +142 -0
- data/lib/vellum_ai/types/test_suite_test_case.rb +68 -0
- data/lib/vellum_ai/types/upload_document_error_response.rb +45 -0
- data/lib/vellum_ai/types/upload_document_response.rb +45 -0
- data/lib/vellum_ai/types/vellum_error.rb +51 -0
- data/lib/vellum_ai/types/vellum_error_code_enum.rb +10 -0
- data/lib/vellum_ai/types/vellum_error_request.rb +51 -0
- data/lib/vellum_ai/types/vellum_image.rb +50 -0
- data/lib/vellum_ai/types/vellum_image_request.rb +50 -0
- data/lib/vellum_ai/types/vellum_variable.rb +56 -0
- data/lib/vellum_ai/types/vellum_variable_type.rb +16 -0
- data/lib/vellum_ai/types/workflow_event_error.rb +51 -0
- data/lib/vellum_ai/types/workflow_execution_actual_chat_history_request.rb +77 -0
- data/lib/vellum_ai/types/workflow_execution_actual_json_request.rb +73 -0
- data/lib/vellum_ai/types/workflow_execution_actual_string_request.rb +73 -0
- data/lib/vellum_ai/types/workflow_execution_event_error_code.rb +13 -0
- data/lib/vellum_ai/types/workflow_execution_event_type.rb +6 -0
- data/lib/vellum_ai/types/workflow_execution_node_result_event.rb +68 -0
- data/lib/vellum_ai/types/workflow_execution_workflow_result_event.rb +68 -0
- data/lib/vellum_ai/types/workflow_node_result_data.rb +155 -0
- data/lib/vellum_ai/types/workflow_node_result_event.rb +116 -0
- data/lib/vellum_ai/types/workflow_node_result_event_state.rb +11 -0
- data/lib/vellum_ai/types/workflow_output.rb +168 -0
- data/lib/vellum_ai/types/workflow_output_chat_history.rb +60 -0
- data/lib/vellum_ai/types/workflow_output_error.rb +62 -0
- data/lib/vellum_ai/types/workflow_output_function_call.rb +62 -0
- data/lib/vellum_ai/types/workflow_output_image.rb +62 -0
- data/lib/vellum_ai/types/workflow_output_json.rb +56 -0
- data/lib/vellum_ai/types/workflow_output_number.rb +56 -0
- data/lib/vellum_ai/types/workflow_output_search_results.rb +60 -0
- data/lib/vellum_ai/types/workflow_output_string.rb +56 -0
- data/lib/vellum_ai/types/workflow_request_chat_history_input_request.rb +54 -0
- data/lib/vellum_ai/types/workflow_request_input_request.rb +116 -0
- data/lib/vellum_ai/types/workflow_request_json_input_request.rb +50 -0
- data/lib/vellum_ai/types/workflow_request_number_input_request.rb +50 -0
- data/lib/vellum_ai/types/workflow_request_string_input_request.rb +50 -0
- data/lib/vellum_ai/types/workflow_result_event.rb +95 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data.rb +142 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_chat_history.rb +83 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_error.rb +85 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_json.rb +79 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_number.rb +79 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_search_results.rb +83 -0
- data/lib/vellum_ai/types/workflow_result_event_output_data_string.rb +79 -0
- data/lib/vellum_ai/types/workflow_stream_event.rb +90 -0
- data/lib/vellum_ai/workflow_deployments/client.rb +82 -0
- data/lib/vellum_ai/workflow_deployments/types/workflow_deployments_list_request_status.rb +8 -0
- data/lib/vellum_ai.rb +476 -0
- metadata +381 -0
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
require_relative "workflow_execution_workflow_result_event"
|
5
|
+
require_relative "workflow_execution_node_result_event"
|
6
|
+
|
7
|
+
module Vellum
|
8
|
+
class WorkflowStreamEvent
|
9
|
+
attr_reader :member, :discriminant
|
10
|
+
|
11
|
+
private_class_method :new
|
12
|
+
alias kind_of? is_a?
|
13
|
+
# @param member [Object]
|
14
|
+
# @param discriminant [String]
|
15
|
+
# @return [WorkflowStreamEvent]
|
16
|
+
def initialize(member:, discriminant:)
|
17
|
+
# @type [Object]
|
18
|
+
@member = member
|
19
|
+
# @type [String]
|
20
|
+
@discriminant = discriminant
|
21
|
+
end
|
22
|
+
|
23
|
+
# Deserialize a JSON object to an instance of WorkflowStreamEvent
|
24
|
+
#
|
25
|
+
# @param json_object [JSON]
|
26
|
+
# @return [WorkflowStreamEvent]
|
27
|
+
def self.from_json(json_object:)
|
28
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
29
|
+
member = case struct.type
|
30
|
+
when "WORKFLOW"
|
31
|
+
WorkflowExecutionWorkflowResultEvent.from_json(json_object: json_object)
|
32
|
+
when "NODE"
|
33
|
+
WorkflowExecutionNodeResultEvent.from_json(json_object: json_object)
|
34
|
+
else
|
35
|
+
WorkflowExecutionWorkflowResultEvent.from_json(json_object: json_object)
|
36
|
+
end
|
37
|
+
new(member: member, discriminant: struct.type)
|
38
|
+
end
|
39
|
+
|
40
|
+
# For Union Types, to_json functionality is delegated to the wrapped member.
|
41
|
+
#
|
42
|
+
# @return [JSON]
|
43
|
+
def to_json(*_args)
|
44
|
+
case @discriminant
|
45
|
+
when "WORKFLOW"
|
46
|
+
{ **@member.to_json, type: @discriminant }.to_json
|
47
|
+
when "NODE"
|
48
|
+
{ **@member.to_json, type: @discriminant }.to_json
|
49
|
+
else
|
50
|
+
{ "type": @discriminant, value: @member }.to_json
|
51
|
+
end
|
52
|
+
@member.to_json
|
53
|
+
end
|
54
|
+
|
55
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
56
|
+
#
|
57
|
+
# @param obj [Object]
|
58
|
+
# @return [Void]
|
59
|
+
def self.validate_raw(obj:)
|
60
|
+
case obj.type
|
61
|
+
when "WORKFLOW"
|
62
|
+
WorkflowExecutionWorkflowResultEvent.validate_raw(obj: obj)
|
63
|
+
when "NODE"
|
64
|
+
WorkflowExecutionNodeResultEvent.validate_raw(obj: obj)
|
65
|
+
else
|
66
|
+
raise("Passed value matched no type within the union, validation failed.")
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
# For Union Types, is_a? functionality is delegated to the wrapped member.
|
71
|
+
#
|
72
|
+
# @param obj [Object]
|
73
|
+
# @return [Boolean]
|
74
|
+
def is_a?(obj)
|
75
|
+
@member.is_a?(obj)
|
76
|
+
end
|
77
|
+
|
78
|
+
# @param member [WorkflowExecutionWorkflowResultEvent]
|
79
|
+
# @return [WorkflowStreamEvent]
|
80
|
+
def self.workflow(member:)
|
81
|
+
new(member: member, discriminant: "WORKFLOW")
|
82
|
+
end
|
83
|
+
|
84
|
+
# @param member [WorkflowExecutionNodeResultEvent]
|
85
|
+
# @return [WorkflowStreamEvent]
|
86
|
+
def self.node(member:)
|
87
|
+
new(member: member, discriminant: "NODE")
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "../../requests"
|
4
|
+
require_relative "types/workflow_deployments_list_request_status"
|
5
|
+
require_relative "../types/paginated_slim_workflow_deployment_list"
|
6
|
+
require "async"
|
7
|
+
|
8
|
+
module Vellum
|
9
|
+
class WorkflowDeploymentsClient
|
10
|
+
attr_reader :request_client
|
11
|
+
|
12
|
+
# @param request_client [RequestClient]
|
13
|
+
# @return [WorkflowDeploymentsClient]
|
14
|
+
def initialize(request_client:)
|
15
|
+
# @type [RequestClient]
|
16
|
+
@request_client = request_client
|
17
|
+
end
|
18
|
+
|
19
|
+
# @param limit [Integer] Number of results to return per page.
|
20
|
+
# @param offset [Integer] The initial index from which to return the results.
|
21
|
+
# @param ordering [String] Which field to use when ordering the results.
|
22
|
+
# @param status [WORKFLOW_DEPLOYMENTS_LIST_REQUEST_STATUS] The current status of the workflow deployment
|
23
|
+
# - `ACTIVE` - Active
|
24
|
+
# - `ARCHIVED` - Archived
|
25
|
+
# @param request_options [RequestOptions]
|
26
|
+
# @return [PaginatedSlimWorkflowDeploymentList]
|
27
|
+
def list(limit: nil, offset: nil, ordering: nil, status: nil, request_options: nil)
|
28
|
+
response = @request_client.conn.get do |req|
|
29
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
30
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
31
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
32
|
+
req.params = {
|
33
|
+
**(request_options&.additional_query_parameters || {}),
|
34
|
+
"limit": limit,
|
35
|
+
"offset": offset,
|
36
|
+
"ordering": ordering,
|
37
|
+
"status": status
|
38
|
+
}.compact
|
39
|
+
req.url "#{@request_client.default_environment[:Default]}/v1/workflow-deployments"
|
40
|
+
end
|
41
|
+
PaginatedSlimWorkflowDeploymentList.from_json(json_object: response.body)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
class AsyncWorkflowDeploymentsClient
|
46
|
+
attr_reader :request_client
|
47
|
+
|
48
|
+
# @param request_client [AsyncRequestClient]
|
49
|
+
# @return [AsyncWorkflowDeploymentsClient]
|
50
|
+
def initialize(request_client:)
|
51
|
+
# @type [AsyncRequestClient]
|
52
|
+
@request_client = request_client
|
53
|
+
end
|
54
|
+
|
55
|
+
# @param limit [Integer] Number of results to return per page.
|
56
|
+
# @param offset [Integer] The initial index from which to return the results.
|
57
|
+
# @param ordering [String] Which field to use when ordering the results.
|
58
|
+
# @param status [WORKFLOW_DEPLOYMENTS_LIST_REQUEST_STATUS] The current status of the workflow deployment
|
59
|
+
# - `ACTIVE` - Active
|
60
|
+
# - `ARCHIVED` - Archived
|
61
|
+
# @param request_options [RequestOptions]
|
62
|
+
# @return [PaginatedSlimWorkflowDeploymentList]
|
63
|
+
def list(limit: nil, offset: nil, ordering: nil, status: nil, request_options: nil)
|
64
|
+
Async do
|
65
|
+
response = @request_client.conn.get do |req|
|
66
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
67
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
68
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
69
|
+
req.params = {
|
70
|
+
**(request_options&.additional_query_parameters || {}),
|
71
|
+
"limit": limit,
|
72
|
+
"offset": offset,
|
73
|
+
"ordering": ordering,
|
74
|
+
"status": status
|
75
|
+
}.compact
|
76
|
+
req.url "#{@request_client.default_environment[:Default]}/v1/workflow-deployments"
|
77
|
+
end
|
78
|
+
PaginatedSlimWorkflowDeploymentList.from_json(json_object: response.body)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
data/lib/vellum_ai.rb
ADDED
@@ -0,0 +1,476 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "environment"
|
4
|
+
require_relative "types_export"
|
5
|
+
require_relative "requests"
|
6
|
+
require_relative "vellum_ai/deployments/client"
|
7
|
+
require_relative "vellum_ai/document_indexes/client"
|
8
|
+
require_relative "vellum_ai/documents/client"
|
9
|
+
require_relative "vellum_ai/model_versions/client"
|
10
|
+
require_relative "vellum_ai/registered_prompts/client"
|
11
|
+
require_relative "vellum_ai/sandboxes/client"
|
12
|
+
require_relative "vellum_ai/test_suites/client"
|
13
|
+
require_relative "vellum_ai/workflow_deployments/client"
|
14
|
+
require_relative "vellum_ai/types/prompt_deployment_input_request"
|
15
|
+
require_relative "vellum_ai/types/prompt_deployment_expand_meta_request_request"
|
16
|
+
require_relative "vellum_ai/types/raw_prompt_execution_overrides_request"
|
17
|
+
require_relative "vellum_ai/types/execute_prompt_response"
|
18
|
+
require_relative "vellum_ai/types/workflow_request_input_request"
|
19
|
+
require_relative "vellum_ai/types/execute_workflow_response"
|
20
|
+
require_relative "vellum_ai/types/generate_request"
|
21
|
+
require_relative "vellum_ai/types/generate_options_request"
|
22
|
+
require_relative "vellum_ai/types/generate_response"
|
23
|
+
require_relative "vellum_ai/types/search_request_options_request"
|
24
|
+
require_relative "vellum_ai/types/search_response"
|
25
|
+
require_relative "vellum_ai/types/submit_completion_actual_request"
|
26
|
+
require_relative "vellum_ai/types/submit_workflow_execution_actual_request"
|
27
|
+
|
28
|
+
module Vellum
|
29
|
+
class Client
|
30
|
+
attr_reader :deployments, :document_indexes, :documents, :model_versions, :registered_prompts, :sandboxes,
|
31
|
+
:test_suites, :workflow_deployments
|
32
|
+
|
33
|
+
# @param environment [Environment]
|
34
|
+
# @param max_retries [Long] The number of times to retry a failed request, defaults to 2.
|
35
|
+
# @param timeout_in_seconds [Long]
|
36
|
+
# @param api_key [String]
|
37
|
+
# @return [Client]
|
38
|
+
def initialize(api_key:, environment: Environment::PRODUCTION, max_retries: nil, timeout_in_seconds: nil)
|
39
|
+
@request_client = RequestClient.new(environment: environment, max_retries: max_retries,
|
40
|
+
timeout_in_seconds: timeout_in_seconds, api_key: api_key)
|
41
|
+
@deployments = DeploymentsClient.new(request_client: @request_client)
|
42
|
+
@document_indexes = DocumentIndexesClient.new(request_client: @request_client)
|
43
|
+
@documents = DocumentsClient.new(request_client: @request_client)
|
44
|
+
@model_versions = ModelVersionsClient.new(request_client: @request_client)
|
45
|
+
@registered_prompts = RegisteredPromptsClient.new(request_client: @request_client)
|
46
|
+
@sandboxes = SandboxesClient.new(request_client: @request_client)
|
47
|
+
@test_suites = TestSuitesClient.new(request_client: @request_client)
|
48
|
+
@workflow_deployments = WorkflowDeploymentsClient.new(request_client: @request_client)
|
49
|
+
end
|
50
|
+
|
51
|
+
# Executes a deployed Prompt and returns the result.
|
52
|
+
#
|
53
|
+
# Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
|
54
|
+
# In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
|
55
|
+
#
|
56
|
+
# @param inputs [Array<Hash>] The list of inputs defined in the Prompt's deployment with their corresponding values.Request of type Array<PromptDeploymentInputRequest>, as a Hash
|
57
|
+
# @param prompt_deployment_id [String] The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
58
|
+
# @param prompt_deployment_name [String] The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
59
|
+
# @param release_tag [String] Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
60
|
+
# @param external_id [String]
|
61
|
+
# @param expand_meta [Hash] The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.Request of type PromptDeploymentExpandMetaRequestRequest, as a Hash
|
62
|
+
# * :model_name (Boolean)
|
63
|
+
# * :latency (Boolean)
|
64
|
+
# * :deployment_release_tag (Boolean)
|
65
|
+
# * :prompt_version_id (Boolean)
|
66
|
+
# * :finish_reason (Boolean)
|
67
|
+
# @param raw_overrides [Hash] Request of type RawPromptExecutionOverridesRequest, as a Hash
|
68
|
+
# * :body (Hash{String => String})
|
69
|
+
# * :headers (Hash{String => String})
|
70
|
+
# * :url (String)
|
71
|
+
# @param expand_raw [Array<String>] Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
|
72
|
+
# @param metadata [Hash{String => String}]
|
73
|
+
# @param request_options [RequestOptions]
|
74
|
+
# @return [ExecutePromptResponse]
|
75
|
+
def execute_prompt(inputs:, prompt_deployment_id: nil, prompt_deployment_name: nil, release_tag: nil,
|
76
|
+
external_id: nil, expand_meta: nil, raw_overrides: nil, expand_raw: nil, metadata: nil, request_options: nil)
|
77
|
+
response = @request_client.conn.post do |req|
|
78
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
79
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
80
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
81
|
+
req.body = {
|
82
|
+
**(request_options&.additional_body_parameters || {}),
|
83
|
+
inputs: inputs,
|
84
|
+
prompt_deployment_id: prompt_deployment_id,
|
85
|
+
prompt_deployment_name: prompt_deployment_name,
|
86
|
+
release_tag: release_tag,
|
87
|
+
external_id: external_id,
|
88
|
+
expand_meta: expand_meta,
|
89
|
+
raw_overrides: raw_overrides,
|
90
|
+
expand_raw: expand_raw,
|
91
|
+
metadata: metadata
|
92
|
+
}.compact
|
93
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/execute-prompt"
|
94
|
+
end
|
95
|
+
ExecutePromptResponse.from_json(json_object: response.body)
|
96
|
+
end
|
97
|
+
|
98
|
+
# Executes a deployed Workflow and returns its outputs.
|
99
|
+
#
|
100
|
+
# @param workflow_deployment_id [String] The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
101
|
+
# @param workflow_deployment_name [String] The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
102
|
+
# @param release_tag [String] Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
103
|
+
# @param inputs [Array<Hash>] The list of inputs defined in the Workflow's Deployment with their corresponding values.Request of type Array<WorkflowRequestInputRequest>, as a Hash
|
104
|
+
# @param external_id [String] Optionally include a unique identifier for monitoring purposes.
|
105
|
+
# @param request_options [RequestOptions]
|
106
|
+
# @return [ExecuteWorkflowResponse]
|
107
|
+
def execute_workflow(inputs:, workflow_deployment_id: nil, workflow_deployment_name: nil, release_tag: nil,
|
108
|
+
external_id: nil, request_options: nil)
|
109
|
+
response = @request_client.conn.post do |req|
|
110
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
111
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
112
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
113
|
+
req.body = {
|
114
|
+
**(request_options&.additional_body_parameters || {}),
|
115
|
+
workflow_deployment_id: workflow_deployment_id,
|
116
|
+
workflow_deployment_name: workflow_deployment_name,
|
117
|
+
release_tag: release_tag,
|
118
|
+
inputs: inputs,
|
119
|
+
external_id: external_id
|
120
|
+
}.compact
|
121
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/execute-workflow"
|
122
|
+
end
|
123
|
+
ExecuteWorkflowResponse.from_json(json_object: response.body)
|
124
|
+
end
|
125
|
+
|
126
|
+
# Generate a completion using a previously defined deployment.
|
127
|
+
#
|
128
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
129
|
+
#
|
130
|
+
# @param deployment_id [String] The ID of the deployment. Must provide either this or deployment_name.
|
131
|
+
# @param deployment_name [String] The name of the deployment. Must provide either this or deployment_id.
|
132
|
+
# @param requests [Array<Hash>] The generation request to make. Bulk requests are no longer supported, this field must be an array of length 1.Request of type Array<GenerateRequest>, as a Hash
|
133
|
+
# * :input_values (Hash{String => String})
|
134
|
+
# * :chat_history (Array<ChatMessageRequest>)
|
135
|
+
# * :external_ids (Array<String>)
|
136
|
+
# @param options [Hash] Additional configuration that can be used to control what's included in the response.Request of type GenerateOptionsRequest, as a Hash
|
137
|
+
# * :logprobs (LOGPROBS_ENUM)
|
138
|
+
# @param request_options [RequestOptions]
|
139
|
+
# @return [GenerateResponse]
|
140
|
+
def generate(requests:, deployment_id: nil, deployment_name: nil, options: nil, request_options: nil)
|
141
|
+
response = @request_client.conn.post do |req|
|
142
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
143
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
144
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
145
|
+
req.body = {
|
146
|
+
**(request_options&.additional_body_parameters || {}),
|
147
|
+
deployment_id: deployment_id,
|
148
|
+
deployment_name: deployment_name,
|
149
|
+
requests: requests,
|
150
|
+
options: options
|
151
|
+
}.compact
|
152
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/generate"
|
153
|
+
end
|
154
|
+
GenerateResponse.from_json(json_object: response.body)
|
155
|
+
end
|
156
|
+
|
157
|
+
# Perform a search against a document index.
|
158
|
+
#
|
159
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
160
|
+
#
|
161
|
+
# @param index_id [String] The ID of the index to search against. Must provide either this or index_name.
|
162
|
+
# @param index_name [String] The name of the index to search against. Must provide either this or index_id.
|
163
|
+
# @param query [String] The query to search for.
|
164
|
+
# @param options [Hash] Configuration options for the search.Request of type SearchRequestOptionsRequest, as a Hash
|
165
|
+
# * :limit (Integer)
|
166
|
+
# * :weights (Hash)
|
167
|
+
# * :semantic_similarity (Float)
|
168
|
+
# * :keywords (Float)
|
169
|
+
# * :result_merging (Hash)
|
170
|
+
# * :enabled (Boolean)
|
171
|
+
# * :filters (Hash)
|
172
|
+
# * :external_ids (Array<String>)
|
173
|
+
# * :metadata (Hash)
|
174
|
+
# * :combinator (METADATA_FILTER_RULE_COMBINATOR)
|
175
|
+
# * :negated (Boolean)
|
176
|
+
# * :rules (Array<MetadataFilterRuleRequest>)
|
177
|
+
# * :field (String)
|
178
|
+
# * :operator (LOGICAL_OPERATOR)
|
179
|
+
# * :value (String)
|
180
|
+
# @param request_options [RequestOptions]
|
181
|
+
# @return [SearchResponse]
|
182
|
+
def search(query:, index_id: nil, index_name: nil, options: nil, request_options: nil)
|
183
|
+
response = @request_client.conn.post do |req|
|
184
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
185
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
186
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
187
|
+
req.body = {
|
188
|
+
**(request_options&.additional_body_parameters || {}),
|
189
|
+
index_id: index_id,
|
190
|
+
index_name: index_name,
|
191
|
+
query: query,
|
192
|
+
options: options
|
193
|
+
}.compact
|
194
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/search"
|
195
|
+
end
|
196
|
+
SearchResponse.from_json(json_object: response.body)
|
197
|
+
end
|
198
|
+
|
199
|
+
# Used to submit feedback regarding the quality of previously generated completions.
|
200
|
+
#
|
201
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
202
|
+
#
|
203
|
+
# @param deployment_id [String] The ID of the deployment. Must provide either this or deployment_name.
|
204
|
+
# @param deployment_name [String] The name of the deployment. Must provide either this or deployment_id.
|
205
|
+
# @param actuals [Array<Hash>] Feedback regarding the quality of previously generated completionsRequest of type Array<SubmitCompletionActualRequest>, as a Hash
|
206
|
+
# * :id (String)
|
207
|
+
# * :external_id (String)
|
208
|
+
# * :text (String)
|
209
|
+
# * :quality (Float)
|
210
|
+
# * :timestamp (DateTime)
|
211
|
+
# @param request_options [RequestOptions]
|
212
|
+
# @return [Void]
|
213
|
+
def submit_completion_actuals(actuals:, deployment_id: nil, deployment_name: nil, request_options: nil)
|
214
|
+
@request_client.conn.post do |req|
|
215
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
216
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
217
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
218
|
+
req.body = {
|
219
|
+
**(request_options&.additional_body_parameters || {}),
|
220
|
+
deployment_id: deployment_id,
|
221
|
+
deployment_name: deployment_name,
|
222
|
+
actuals: actuals
|
223
|
+
}.compact
|
224
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/submit-completion-actuals"
|
225
|
+
end
|
226
|
+
end
|
227
|
+
|
228
|
+
# Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
229
|
+
#
|
230
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
231
|
+
#
|
232
|
+
# @param actuals [Array<Hash>] Feedback regarding the quality of an output on a previously executed workflow.Request of type Array<SubmitWorkflowExecutionActualRequest>, as a Hash
|
233
|
+
# @param execution_id [String] The Vellum-generated ID of a previously executed workflow. Must provide either this or external_id.
|
234
|
+
# @param external_id [String] The external ID that was originally provided by when executing the workflow, if applicable, that you'd now like to submit actuals for. Must provide either this or execution_id.
|
235
|
+
# @param request_options [RequestOptions]
|
236
|
+
# @return [Void]
|
237
|
+
def submit_workflow_execution_actuals(actuals:, execution_id: nil, external_id: nil, request_options: nil)
|
238
|
+
@request_client.conn.post do |req|
|
239
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
240
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
241
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
242
|
+
req.body = {
|
243
|
+
**(request_options&.additional_body_parameters || {}),
|
244
|
+
actuals: actuals,
|
245
|
+
execution_id: execution_id,
|
246
|
+
external_id: external_id
|
247
|
+
}.compact
|
248
|
+
req.url "#{@request_client.default_environment[:Predict]}/v1/submit-workflow-execution-actuals"
|
249
|
+
end
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
class AsyncClient
|
254
|
+
attr_reader :deployments, :document_indexes, :documents, :model_versions, :registered_prompts, :sandboxes,
|
255
|
+
:test_suites, :workflow_deployments
|
256
|
+
|
257
|
+
# @param environment [Environment]
|
258
|
+
# @param max_retries [Long] The number of times to retry a failed request, defaults to 2.
|
259
|
+
# @param timeout_in_seconds [Long]
|
260
|
+
# @param api_key [String]
|
261
|
+
# @return [AsyncClient]
|
262
|
+
def initialize(api_key:, environment: Environment::PRODUCTION, max_retries: nil, timeout_in_seconds: nil)
|
263
|
+
@async_request_client = AsyncRequestClient.new(environment: environment, max_retries: max_retries,
|
264
|
+
timeout_in_seconds: timeout_in_seconds, api_key: api_key)
|
265
|
+
@deployments = AsyncDeploymentsClient.new(request_client: @async_request_client)
|
266
|
+
@document_indexes = AsyncDocumentIndexesClient.new(request_client: @async_request_client)
|
267
|
+
@documents = AsyncDocumentsClient.new(request_client: @async_request_client)
|
268
|
+
@model_versions = AsyncModelVersionsClient.new(request_client: @async_request_client)
|
269
|
+
@registered_prompts = AsyncRegisteredPromptsClient.new(request_client: @async_request_client)
|
270
|
+
@sandboxes = AsyncSandboxesClient.new(request_client: @async_request_client)
|
271
|
+
@test_suites = AsyncTestSuitesClient.new(request_client: @async_request_client)
|
272
|
+
@workflow_deployments = AsyncWorkflowDeploymentsClient.new(request_client: @async_request_client)
|
273
|
+
end
|
274
|
+
|
275
|
+
# Executes a deployed Prompt and returns the result.
|
276
|
+
#
|
277
|
+
# Note: This endpoint temporarily does not support prompts with function calling, support is coming soon.
|
278
|
+
# In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
|
279
|
+
#
|
280
|
+
# @param inputs [Array<Hash>] The list of inputs defined in the Prompt's deployment with their corresponding values.Request of type Array<PromptDeploymentInputRequest>, as a Hash
|
281
|
+
# @param prompt_deployment_id [String] The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
282
|
+
# @param prompt_deployment_name [String] The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
283
|
+
# @param release_tag [String] Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
284
|
+
# @param external_id [String]
|
285
|
+
# @param expand_meta [Hash] The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.Request of type PromptDeploymentExpandMetaRequestRequest, as a Hash
|
286
|
+
# * :model_name (Boolean)
|
287
|
+
# * :latency (Boolean)
|
288
|
+
# * :deployment_release_tag (Boolean)
|
289
|
+
# * :prompt_version_id (Boolean)
|
290
|
+
# * :finish_reason (Boolean)
|
291
|
+
# @param raw_overrides [Hash] Request of type RawPromptExecutionOverridesRequest, as a Hash
|
292
|
+
# * :body (Hash{String => String})
|
293
|
+
# * :headers (Hash{String => String})
|
294
|
+
# * :url (String)
|
295
|
+
# @param expand_raw [Array<String>] Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
|
296
|
+
# @param metadata [Hash{String => String}]
|
297
|
+
# @param request_options [RequestOptions]
|
298
|
+
# @return [ExecutePromptResponse]
|
299
|
+
def execute_prompt(inputs:, prompt_deployment_id: nil, prompt_deployment_name: nil, release_tag: nil,
|
300
|
+
external_id: nil, expand_meta: nil, raw_overrides: nil, expand_raw: nil, metadata: nil, request_options: nil)
|
301
|
+
response = @async_request_client.conn.post do |req|
|
302
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
303
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
304
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
305
|
+
req.body = {
|
306
|
+
**(request_options&.additional_body_parameters || {}),
|
307
|
+
inputs: inputs,
|
308
|
+
prompt_deployment_id: prompt_deployment_id,
|
309
|
+
prompt_deployment_name: prompt_deployment_name,
|
310
|
+
release_tag: release_tag,
|
311
|
+
external_id: external_id,
|
312
|
+
expand_meta: expand_meta,
|
313
|
+
raw_overrides: raw_overrides,
|
314
|
+
expand_raw: expand_raw,
|
315
|
+
metadata: metadata
|
316
|
+
}.compact
|
317
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/execute-prompt"
|
318
|
+
end
|
319
|
+
ExecutePromptResponse.from_json(json_object: response.body)
|
320
|
+
end
|
321
|
+
|
322
|
+
# Executes a deployed Workflow and returns its outputs.
|
323
|
+
#
|
324
|
+
# @param workflow_deployment_id [String] The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
325
|
+
# @param workflow_deployment_name [String] The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
326
|
+
# @param release_tag [String] Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
327
|
+
# @param inputs [Array<Hash>] The list of inputs defined in the Workflow's Deployment with their corresponding values.Request of type Array<WorkflowRequestInputRequest>, as a Hash
|
328
|
+
# @param external_id [String] Optionally include a unique identifier for monitoring purposes.
|
329
|
+
# @param request_options [RequestOptions]
|
330
|
+
# @return [ExecuteWorkflowResponse]
|
331
|
+
def execute_workflow(inputs:, workflow_deployment_id: nil, workflow_deployment_name: nil, release_tag: nil,
|
332
|
+
external_id: nil, request_options: nil)
|
333
|
+
response = @async_request_client.conn.post do |req|
|
334
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
335
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
336
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
337
|
+
req.body = {
|
338
|
+
**(request_options&.additional_body_parameters || {}),
|
339
|
+
workflow_deployment_id: workflow_deployment_id,
|
340
|
+
workflow_deployment_name: workflow_deployment_name,
|
341
|
+
release_tag: release_tag,
|
342
|
+
inputs: inputs,
|
343
|
+
external_id: external_id
|
344
|
+
}.compact
|
345
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/execute-workflow"
|
346
|
+
end
|
347
|
+
ExecuteWorkflowResponse.from_json(json_object: response.body)
|
348
|
+
end
|
349
|
+
|
350
|
+
# Generate a completion using a previously defined deployment.
|
351
|
+
#
|
352
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
353
|
+
#
|
354
|
+
# @param deployment_id [String] The ID of the deployment. Must provide either this or deployment_name.
|
355
|
+
# @param deployment_name [String] The name of the deployment. Must provide either this or deployment_id.
|
356
|
+
# @param requests [Array<Hash>] The generation request to make. Bulk requests are no longer supported, this field must be an array of length 1.Request of type Array<GenerateRequest>, as a Hash
|
357
|
+
# * :input_values (Hash{String => String})
|
358
|
+
# * :chat_history (Array<ChatMessageRequest>)
|
359
|
+
# * :external_ids (Array<String>)
|
360
|
+
# @param options [Hash] Additional configuration that can be used to control what's included in the response.Request of type GenerateOptionsRequest, as a Hash
|
361
|
+
# * :logprobs (LOGPROBS_ENUM)
|
362
|
+
# @param request_options [RequestOptions]
|
363
|
+
# @return [GenerateResponse]
|
364
|
+
def generate(requests:, deployment_id: nil, deployment_name: nil, options: nil, request_options: nil)
|
365
|
+
response = @async_request_client.conn.post do |req|
|
366
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
367
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
368
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
369
|
+
req.body = {
|
370
|
+
**(request_options&.additional_body_parameters || {}),
|
371
|
+
deployment_id: deployment_id,
|
372
|
+
deployment_name: deployment_name,
|
373
|
+
requests: requests,
|
374
|
+
options: options
|
375
|
+
}.compact
|
376
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/generate"
|
377
|
+
end
|
378
|
+
GenerateResponse.from_json(json_object: response.body)
|
379
|
+
end
|
380
|
+
|
381
|
+
# Perform a search against a document index.
|
382
|
+
#
|
383
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
384
|
+
#
|
385
|
+
# @param index_id [String] The ID of the index to search against. Must provide either this or index_name.
|
386
|
+
# @param index_name [String] The name of the index to search against. Must provide either this or index_id.
|
387
|
+
# @param query [String] The query to search for.
|
388
|
+
# @param options [Hash] Configuration options for the search.Request of type SearchRequestOptionsRequest, as a Hash
|
389
|
+
# * :limit (Integer)
|
390
|
+
# * :weights (Hash)
|
391
|
+
# * :semantic_similarity (Float)
|
392
|
+
# * :keywords (Float)
|
393
|
+
# * :result_merging (Hash)
|
394
|
+
# * :enabled (Boolean)
|
395
|
+
# * :filters (Hash)
|
396
|
+
# * :external_ids (Array<String>)
|
397
|
+
# * :metadata (Hash)
|
398
|
+
# * :combinator (METADATA_FILTER_RULE_COMBINATOR)
|
399
|
+
# * :negated (Boolean)
|
400
|
+
# * :rules (Array<MetadataFilterRuleRequest>)
|
401
|
+
# * :field (String)
|
402
|
+
# * :operator (LOGICAL_OPERATOR)
|
403
|
+
# * :value (String)
|
404
|
+
# @param request_options [RequestOptions]
|
405
|
+
# @return [SearchResponse]
|
406
|
+
def search(query:, index_id: nil, index_name: nil, options: nil, request_options: nil)
|
407
|
+
response = @async_request_client.conn.post do |req|
|
408
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
409
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
410
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
411
|
+
req.body = {
|
412
|
+
**(request_options&.additional_body_parameters || {}),
|
413
|
+
index_id: index_id,
|
414
|
+
index_name: index_name,
|
415
|
+
query: query,
|
416
|
+
options: options
|
417
|
+
}.compact
|
418
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/search"
|
419
|
+
end
|
420
|
+
SearchResponse.from_json(json_object: response.body)
|
421
|
+
end
|
422
|
+
|
423
|
+
# Used to submit feedback regarding the quality of previously generated completions.
|
424
|
+
#
|
425
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
426
|
+
#
|
427
|
+
# @param deployment_id [String] The ID of the deployment. Must provide either this or deployment_name.
|
428
|
+
# @param deployment_name [String] The name of the deployment. Must provide either this or deployment_id.
|
429
|
+
# @param actuals [Array<Hash>] Feedback regarding the quality of previously generated completionsRequest of type Array<SubmitCompletionActualRequest>, as a Hash
|
430
|
+
# * :id (String)
|
431
|
+
# * :external_id (String)
|
432
|
+
# * :text (String)
|
433
|
+
# * :quality (Float)
|
434
|
+
# * :timestamp (DateTime)
|
435
|
+
# @param request_options [RequestOptions]
|
436
|
+
# @return [Void]
|
437
|
+
def submit_completion_actuals(actuals:, deployment_id: nil, deployment_name: nil, request_options: nil)
|
438
|
+
@async_request_client.conn.post do |req|
|
439
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
440
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
441
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
442
|
+
req.body = {
|
443
|
+
**(request_options&.additional_body_parameters || {}),
|
444
|
+
deployment_id: deployment_id,
|
445
|
+
deployment_name: deployment_name,
|
446
|
+
actuals: actuals
|
447
|
+
}.compact
|
448
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/submit-completion-actuals"
|
449
|
+
end
|
450
|
+
end
|
451
|
+
|
452
|
+
# Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
453
|
+
#
|
454
|
+
# **Note:** Uses a base url of `https://predict.vellum.ai`.
|
455
|
+
#
|
456
|
+
# @param actuals [Array<Hash>] Feedback regarding the quality of an output on a previously executed workflow.Request of type Array<SubmitWorkflowExecutionActualRequest>, as a Hash
|
457
|
+
# @param execution_id [String] The Vellum-generated ID of a previously executed workflow. Must provide either this or external_id.
|
458
|
+
# @param external_id [String] The external ID that was originally provided by when executing the workflow, if applicable, that you'd now like to submit actuals for. Must provide either this or execution_id.
|
459
|
+
# @param request_options [RequestOptions]
|
460
|
+
# @return [Void]
|
461
|
+
def submit_workflow_execution_actuals(actuals:, execution_id: nil, external_id: nil, request_options: nil)
|
462
|
+
@async_request_client.conn.post do |req|
|
463
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
464
|
+
req.headers["X_API_KEY"] = request_options.api_key unless request_options&.api_key.nil?
|
465
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
466
|
+
req.body = {
|
467
|
+
**(request_options&.additional_body_parameters || {}),
|
468
|
+
actuals: actuals,
|
469
|
+
execution_id: execution_id,
|
470
|
+
external_id: external_id
|
471
|
+
}.compact
|
472
|
+
req.url "#{@async_request_client.default_environment[:Predict]}/v1/submit-workflow-execution-actuals"
|
473
|
+
end
|
474
|
+
end
|
475
|
+
end
|
476
|
+
end
|