langchainrb 0.7.5 → 0.12.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +78 -0
- data/README.md +113 -56
- data/lib/langchain/assistants/assistant.rb +213 -0
- data/lib/langchain/assistants/message.rb +58 -0
- data/lib/langchain/assistants/thread.rb +34 -0
- data/lib/langchain/chunker/markdown.rb +37 -0
- data/lib/langchain/chunker/recursive_text.rb +0 -2
- data/lib/langchain/chunker/semantic.rb +1 -3
- data/lib/langchain/chunker/sentence.rb +0 -2
- data/lib/langchain/chunker/text.rb +0 -2
- data/lib/langchain/contextual_logger.rb +1 -1
- data/lib/langchain/data.rb +4 -3
- data/lib/langchain/llm/ai21.rb +1 -1
- data/lib/langchain/llm/anthropic.rb +86 -11
- data/lib/langchain/llm/aws_bedrock.rb +52 -0
- data/lib/langchain/llm/azure.rb +10 -97
- data/lib/langchain/llm/base.rb +3 -2
- data/lib/langchain/llm/cohere.rb +5 -7
- data/lib/langchain/llm/google_palm.rb +4 -2
- data/lib/langchain/llm/google_vertex_ai.rb +151 -0
- data/lib/langchain/llm/hugging_face.rb +1 -1
- data/lib/langchain/llm/llama_cpp.rb +18 -16
- data/lib/langchain/llm/mistral_ai.rb +68 -0
- data/lib/langchain/llm/ollama.rb +209 -27
- data/lib/langchain/llm/openai.rb +138 -170
- data/lib/langchain/llm/prompts/ollama/summarize_template.yaml +9 -0
- data/lib/langchain/llm/replicate.rb +1 -7
- data/lib/langchain/llm/response/anthropic_response.rb +20 -0
- data/lib/langchain/llm/response/base_response.rb +7 -0
- data/lib/langchain/llm/response/google_palm_response.rb +4 -0
- data/lib/langchain/llm/response/google_vertex_ai_response.rb +33 -0
- data/lib/langchain/llm/response/llama_cpp_response.rb +13 -0
- data/lib/langchain/llm/response/mistral_ai_response.rb +39 -0
- data/lib/langchain/llm/response/ollama_response.rb +27 -1
- data/lib/langchain/llm/response/openai_response.rb +8 -0
- data/lib/langchain/loader.rb +3 -2
- data/lib/langchain/output_parsers/base.rb +0 -4
- data/lib/langchain/output_parsers/output_fixing_parser.rb +7 -14
- data/lib/langchain/output_parsers/structured_output_parser.rb +0 -10
- data/lib/langchain/processors/csv.rb +37 -3
- data/lib/langchain/processors/eml.rb +64 -0
- data/lib/langchain/processors/markdown.rb +17 -0
- data/lib/langchain/processors/pptx.rb +29 -0
- data/lib/langchain/prompt/loading.rb +1 -1
- data/lib/langchain/tool/base.rb +21 -53
- data/lib/langchain/tool/calculator/calculator.json +19 -0
- data/lib/langchain/tool/{calculator.rb → calculator/calculator.rb} +8 -16
- data/lib/langchain/tool/database/database.json +46 -0
- data/lib/langchain/tool/database/database.rb +99 -0
- data/lib/langchain/tool/file_system/file_system.json +57 -0
- data/lib/langchain/tool/file_system/file_system.rb +32 -0
- data/lib/langchain/tool/google_search/google_search.json +19 -0
- data/lib/langchain/tool/{google_search.rb → google_search/google_search.rb} +5 -15
- data/lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.json +19 -0
- data/lib/langchain/tool/{ruby_code_interpreter.rb → ruby_code_interpreter/ruby_code_interpreter.rb} +8 -4
- data/lib/langchain/tool/vectorsearch/vectorsearch.json +24 -0
- data/lib/langchain/tool/vectorsearch/vectorsearch.rb +36 -0
- data/lib/langchain/tool/weather/weather.json +19 -0
- data/lib/langchain/tool/{weather.rb → weather/weather.rb} +3 -15
- data/lib/langchain/tool/wikipedia/wikipedia.json +19 -0
- data/lib/langchain/tool/{wikipedia.rb → wikipedia/wikipedia.rb} +9 -9
- data/lib/langchain/utils/token_length/ai21_validator.rb +6 -2
- data/lib/langchain/utils/token_length/base_validator.rb +1 -1
- data/lib/langchain/utils/token_length/cohere_validator.rb +6 -2
- data/lib/langchain/utils/token_length/google_palm_validator.rb +5 -1
- data/lib/langchain/utils/token_length/openai_validator.rb +55 -1
- data/lib/langchain/utils/token_length/token_limit_exceeded.rb +1 -1
- data/lib/langchain/vectorsearch/base.rb +11 -4
- data/lib/langchain/vectorsearch/chroma.rb +10 -1
- data/lib/langchain/vectorsearch/elasticsearch.rb +53 -4
- data/lib/langchain/vectorsearch/epsilla.rb +149 -0
- data/lib/langchain/vectorsearch/hnswlib.rb +5 -1
- data/lib/langchain/vectorsearch/milvus.rb +4 -2
- data/lib/langchain/vectorsearch/pgvector.rb +14 -4
- data/lib/langchain/vectorsearch/pinecone.rb +8 -5
- data/lib/langchain/vectorsearch/qdrant.rb +16 -4
- data/lib/langchain/vectorsearch/weaviate.rb +20 -2
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +20 -5
- metadata +182 -45
- data/lib/langchain/agent/agents.md +0 -54
- data/lib/langchain/agent/base.rb +0 -20
- data/lib/langchain/agent/react_agent/react_agent_prompt.yaml +0 -26
- data/lib/langchain/agent/react_agent.rb +0 -131
- data/lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml +0 -11
- data/lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml +0 -21
- data/lib/langchain/agent/sql_query_agent.rb +0 -82
- data/lib/langchain/conversation/context.rb +0 -8
- data/lib/langchain/conversation/memory.rb +0 -86
- data/lib/langchain/conversation/message.rb +0 -48
- data/lib/langchain/conversation/prompt.rb +0 -8
- data/lib/langchain/conversation/response.rb +0 -8
- data/lib/langchain/conversation.rb +0 -93
- data/lib/langchain/tool/database.rb +0 -90
@@ -1,11 +0,0 @@
|
|
1
|
-
_type: prompt
|
2
|
-
template: |
|
3
|
-
Given an input question and results of a SQL query, look at the results and return the answer. Use the following format:
|
4
|
-
Question: {question}
|
5
|
-
The SQL query: {sql_query}
|
6
|
-
Result of the SQLQuery: {results}
|
7
|
-
Final answer: Final answer here
|
8
|
-
input_variables:
|
9
|
-
- question
|
10
|
-
- sql_query
|
11
|
-
- results
|
@@ -1,21 +0,0 @@
|
|
1
|
-
_type: prompt
|
2
|
-
template: |
|
3
|
-
Given an input question, create a syntactically correct {dialect} query to run, then return the query in valid SQL.
|
4
|
-
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
|
5
|
-
Pay attention to use only the column names that you can see in the schema description.
|
6
|
-
Be careful to not query for columns that do not exist.
|
7
|
-
Pay attention to which column is in which table.
|
8
|
-
Also, qualify column names with the table name when needed.
|
9
|
-
|
10
|
-
Only use the tables listed below.
|
11
|
-
{schema}
|
12
|
-
|
13
|
-
Use the following format:
|
14
|
-
|
15
|
-
Question: {question}
|
16
|
-
|
17
|
-
SQLQuery:
|
18
|
-
input_variables:
|
19
|
-
- dialect
|
20
|
-
- schema
|
21
|
-
- question
|
@@ -1,82 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain::Agent
|
4
|
-
class SQLQueryAgent < Base
|
5
|
-
attr_reader :llm, :db, :schema
|
6
|
-
|
7
|
-
#
|
8
|
-
# Initializes the Agent
|
9
|
-
#
|
10
|
-
# @param llm [Object] The LLM client to use
|
11
|
-
# @param db [Object] Database connection info
|
12
|
-
#
|
13
|
-
def initialize(llm:, db:)
|
14
|
-
@llm = llm
|
15
|
-
@db = db
|
16
|
-
@schema = @db.dump_schema
|
17
|
-
end
|
18
|
-
|
19
|
-
#
|
20
|
-
# Ask a question and get an answer
|
21
|
-
#
|
22
|
-
# @param question [String] Question to ask the LLM/Database
|
23
|
-
# @return [String] Answer to the question
|
24
|
-
#
|
25
|
-
def run(question:)
|
26
|
-
prompt = create_prompt_for_sql(question: question)
|
27
|
-
|
28
|
-
# Get the SQL string to execute
|
29
|
-
Langchain.logger.info("Passing the inital prompt to the #{llm.class} LLM", for: self.class)
|
30
|
-
sql_string = llm.complete(prompt: prompt).completion
|
31
|
-
|
32
|
-
# Execute the SQL string and collect the results
|
33
|
-
Langchain.logger.info("Passing the SQL to the Database: #{sql_string}", for: self.class)
|
34
|
-
results = db.execute(input: sql_string)
|
35
|
-
|
36
|
-
# Pass the results and get the LLM to synthesize the answer to the question
|
37
|
-
Langchain.logger.info("Passing the synthesize prompt to the #{llm.class} LLM with results: #{results}", for: self.class)
|
38
|
-
prompt2 = create_prompt_for_answer(question: question, sql_query: sql_string, results: results)
|
39
|
-
llm.complete(prompt: prompt2).completion
|
40
|
-
end
|
41
|
-
|
42
|
-
private
|
43
|
-
|
44
|
-
# Create the initial prompt to pass to the LLM
|
45
|
-
# @param question[String] Question to ask
|
46
|
-
# @return [String] Prompt
|
47
|
-
def create_prompt_for_sql(question:)
|
48
|
-
prompt_template_sql.format(
|
49
|
-
dialect: "standard SQL",
|
50
|
-
schema: schema,
|
51
|
-
question: question
|
52
|
-
)
|
53
|
-
end
|
54
|
-
|
55
|
-
# Load the PromptTemplate from the YAML file
|
56
|
-
# @return [PromptTemplate] PromptTemplate instance
|
57
|
-
def prompt_template_sql
|
58
|
-
Langchain::Prompt.load_from_path(
|
59
|
-
file_path: Langchain.root.join("langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml")
|
60
|
-
)
|
61
|
-
end
|
62
|
-
|
63
|
-
# Create the second prompt to pass to the LLM
|
64
|
-
# @param question [String] Question to ask
|
65
|
-
# @return [String] Prompt
|
66
|
-
def create_prompt_for_answer(question:, sql_query:, results:)
|
67
|
-
prompt_template_answer.format(
|
68
|
-
question: question,
|
69
|
-
sql_query: sql_query,
|
70
|
-
results: results
|
71
|
-
)
|
72
|
-
end
|
73
|
-
|
74
|
-
# Load the PromptTemplate from the YAML file
|
75
|
-
# @return [PromptTemplate] PromptTemplate instance
|
76
|
-
def prompt_template_answer
|
77
|
-
Langchain::Prompt.load_from_path(
|
78
|
-
file_path: Langchain.root.join("langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml")
|
79
|
-
)
|
80
|
-
end
|
81
|
-
end
|
82
|
-
end
|
@@ -1,86 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
class Conversation
|
5
|
-
class Memory
|
6
|
-
attr_reader :examples, :messages
|
7
|
-
|
8
|
-
# The least number of tokens we want to be under the limit by
|
9
|
-
TOKEN_LEEWAY = 20
|
10
|
-
|
11
|
-
def initialize(llm:, messages: [], **options)
|
12
|
-
@llm = llm
|
13
|
-
@context = nil
|
14
|
-
@summary = nil
|
15
|
-
@examples = []
|
16
|
-
@messages = messages
|
17
|
-
@strategy = options.delete(:strategy) || :truncate
|
18
|
-
@options = options
|
19
|
-
end
|
20
|
-
|
21
|
-
def set_context(message)
|
22
|
-
@context = message
|
23
|
-
end
|
24
|
-
|
25
|
-
def add_examples(examples)
|
26
|
-
@examples.concat examples
|
27
|
-
end
|
28
|
-
|
29
|
-
def append_message(message)
|
30
|
-
@messages.append(message)
|
31
|
-
end
|
32
|
-
|
33
|
-
def reduce_messages(exception)
|
34
|
-
case @strategy
|
35
|
-
when :truncate
|
36
|
-
truncate_messages(exception)
|
37
|
-
when :summarize
|
38
|
-
summarize_messages
|
39
|
-
else
|
40
|
-
raise "Unknown strategy: #{@options[:strategy]}"
|
41
|
-
end
|
42
|
-
end
|
43
|
-
|
44
|
-
def context
|
45
|
-
return if @context.nil? && @summary.nil?
|
46
|
-
|
47
|
-
Context.new([@context, @summary].compact.join("\n"))
|
48
|
-
end
|
49
|
-
|
50
|
-
private
|
51
|
-
|
52
|
-
def truncate_messages(exception)
|
53
|
-
raise exception if @messages.size == 1
|
54
|
-
|
55
|
-
token_overflow = exception.token_overflow
|
56
|
-
|
57
|
-
@messages = @messages.drop_while do |message|
|
58
|
-
proceed = token_overflow > -TOKEN_LEEWAY
|
59
|
-
token_overflow -= token_length(message.to_json, model_name, llm: @llm)
|
60
|
-
|
61
|
-
proceed
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
def summarize_messages
|
66
|
-
history = [@summary, @messages.to_json].compact.join("\n")
|
67
|
-
partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
|
68
|
-
|
69
|
-
@summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
|
70
|
-
|
71
|
-
@messages = [@messages.last]
|
72
|
-
end
|
73
|
-
|
74
|
-
def partition_messages
|
75
|
-
end
|
76
|
-
|
77
|
-
def model_name
|
78
|
-
@llm.class::DEFAULTS[:chat_completion_model_name]
|
79
|
-
end
|
80
|
-
|
81
|
-
def token_length(content, model_name, options)
|
82
|
-
@llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
|
83
|
-
end
|
84
|
-
end
|
85
|
-
end
|
86
|
-
end
|
@@ -1,48 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
class Conversation
|
5
|
-
class Message
|
6
|
-
attr_reader :content
|
7
|
-
|
8
|
-
ROLE_MAPPING = {
|
9
|
-
context: "system",
|
10
|
-
prompt: "user",
|
11
|
-
response: "assistant"
|
12
|
-
}
|
13
|
-
|
14
|
-
def initialize(content)
|
15
|
-
@content = content
|
16
|
-
end
|
17
|
-
|
18
|
-
def role
|
19
|
-
ROLE_MAPPING[type]
|
20
|
-
end
|
21
|
-
|
22
|
-
def to_s
|
23
|
-
content
|
24
|
-
end
|
25
|
-
|
26
|
-
def to_h
|
27
|
-
{
|
28
|
-
role: role,
|
29
|
-
content: content
|
30
|
-
}
|
31
|
-
end
|
32
|
-
|
33
|
-
def ==(other)
|
34
|
-
to_json == other.to_json
|
35
|
-
end
|
36
|
-
|
37
|
-
def to_json(options = {})
|
38
|
-
to_h.to_json
|
39
|
-
end
|
40
|
-
|
41
|
-
private
|
42
|
-
|
43
|
-
def type
|
44
|
-
self.class.to_s.split("::").last.downcase.to_sym
|
45
|
-
end
|
46
|
-
end
|
47
|
-
end
|
48
|
-
end
|
@@ -1,93 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
#
|
5
|
-
# A high-level API for running a conversation with an LLM.
|
6
|
-
# Currently supports: OpenAI and Google PaLM LLMs.
|
7
|
-
#
|
8
|
-
# Usage:
|
9
|
-
# llm = Langchain::LLM::OpenAI.new(api_key: "YOUR_API_KEY")
|
10
|
-
# chat = Langchain::Conversation.new(llm: llm)
|
11
|
-
# chat.set_context("You are a chatbot from the future")
|
12
|
-
# chat.message("Tell me about future technologies")
|
13
|
-
#
|
14
|
-
# To stream the chat response:
|
15
|
-
# chat = Langchain::Conversation.new(llm: llm) do |chunk|
|
16
|
-
# print(chunk)
|
17
|
-
# end
|
18
|
-
#
|
19
|
-
class Conversation
|
20
|
-
attr_reader :options
|
21
|
-
|
22
|
-
# Intialize Conversation with a LLM
|
23
|
-
#
|
24
|
-
# @param llm [Object] The LLM to use for the conversation
|
25
|
-
# @param options [Hash] Options to pass to the LLM, like temperature, top_k, etc.
|
26
|
-
# @return [Langchain::Conversation] The Langchain::Conversation instance
|
27
|
-
def initialize(llm:, **options, &block)
|
28
|
-
@llm = llm
|
29
|
-
@context = nil
|
30
|
-
@examples = []
|
31
|
-
@memory = ::Langchain::Conversation::Memory.new(
|
32
|
-
llm: llm,
|
33
|
-
messages: options.delete(:messages) || [],
|
34
|
-
strategy: options.delete(:memory_strategy)
|
35
|
-
)
|
36
|
-
@options = options
|
37
|
-
@block = block
|
38
|
-
end
|
39
|
-
|
40
|
-
def set_functions(functions)
|
41
|
-
@llm.functions = functions
|
42
|
-
end
|
43
|
-
|
44
|
-
# Set the context of the conversation. Usually used to set the model's persona.
|
45
|
-
# @param message [String] The context of the conversation
|
46
|
-
def set_context(message)
|
47
|
-
@memory.set_context ::Langchain::Conversation::Context.new(message)
|
48
|
-
end
|
49
|
-
|
50
|
-
# Add examples to the conversation. Used to give the model a sense of the conversation.
|
51
|
-
# @param examples [Array<Prompt|Response>] The examples to add to the conversation
|
52
|
-
def add_examples(examples)
|
53
|
-
@memory.add_examples examples
|
54
|
-
end
|
55
|
-
|
56
|
-
# Message the model with a prompt and return the response.
|
57
|
-
# @param message [String] The prompt to message the model with
|
58
|
-
# @return [Response] The response from the model
|
59
|
-
def message(message)
|
60
|
-
@memory.append_message ::Langchain::Conversation::Prompt.new(message)
|
61
|
-
ai_message = ::Langchain::Conversation::Response.new(llm_response.chat_completion)
|
62
|
-
@memory.append_message(ai_message)
|
63
|
-
ai_message
|
64
|
-
end
|
65
|
-
|
66
|
-
# Messages from conversation memory
|
67
|
-
# @return [Array<Prompt|Response>] The messages from the conversation memory
|
68
|
-
def messages
|
69
|
-
@memory.messages
|
70
|
-
end
|
71
|
-
|
72
|
-
# Context from conversation memory
|
73
|
-
# @return [Context] Context from conversation memory
|
74
|
-
def context
|
75
|
-
@memory.context
|
76
|
-
end
|
77
|
-
|
78
|
-
# Examples from conversation memory
|
79
|
-
# @return [Array<Prompt|Response>] Examples from the conversation memory
|
80
|
-
def examples
|
81
|
-
@memory.examples
|
82
|
-
end
|
83
|
-
|
84
|
-
private
|
85
|
-
|
86
|
-
def llm_response
|
87
|
-
@llm.chat(messages: @memory.messages.map(&:to_h), context: @memory.context&.to_s, examples: @memory.examples.map(&:to_h), **@options, &@block)
|
88
|
-
rescue Langchain::Utils::TokenLength::TokenLimitExceeded => exception
|
89
|
-
@memory.reduce_messages(exception)
|
90
|
-
retry
|
91
|
-
end
|
92
|
-
end
|
93
|
-
end
|
@@ -1,90 +0,0 @@
|
|
1
|
-
module Langchain::Tool
|
2
|
-
class Database < Base
|
3
|
-
#
|
4
|
-
# Connects to a database, executes SQL queries, and outputs DB schema for Agents to use
|
5
|
-
#
|
6
|
-
# Gem requirements: gem "sequel", "~> 5.68.0"
|
7
|
-
#
|
8
|
-
|
9
|
-
NAME = "database"
|
10
|
-
|
11
|
-
description <<~DESC
|
12
|
-
Useful for getting the result of a database query.
|
13
|
-
|
14
|
-
The input to this tool should be valid SQL.
|
15
|
-
DESC
|
16
|
-
|
17
|
-
attr_reader :db, :requested_tables, :excluded_tables
|
18
|
-
|
19
|
-
#
|
20
|
-
# Establish a database connection
|
21
|
-
#
|
22
|
-
# @param connection_string [String] Database connection info, e.g. 'postgres://user:password@localhost:5432/db_name'
|
23
|
-
# @param tables [Array<Symbol>] The tables to use. Will use all if empty.
|
24
|
-
# @param except_tables [Array<Symbol>] The tables to exclude. Will exclude none if empty.
|
25
|
-
|
26
|
-
# @return [Database] Database object
|
27
|
-
#
|
28
|
-
def initialize(connection_string:, tables: [], exclude_tables: [])
|
29
|
-
depends_on "sequel"
|
30
|
-
|
31
|
-
raise StandardError, "connection_string parameter cannot be blank" if connection_string.empty?
|
32
|
-
|
33
|
-
@db = Sequel.connect(connection_string)
|
34
|
-
@requested_tables = tables
|
35
|
-
@excluded_tables = exclude_tables
|
36
|
-
end
|
37
|
-
|
38
|
-
#
|
39
|
-
# Returns the database schema
|
40
|
-
#
|
41
|
-
# @return [String] schema
|
42
|
-
#
|
43
|
-
def dump_schema
|
44
|
-
Langchain.logger.info("Dumping schema tables and keys", for: self.class)
|
45
|
-
schema = ""
|
46
|
-
db.tables.each do |table|
|
47
|
-
next if excluded_tables.include?(table)
|
48
|
-
next unless requested_tables.empty? || requested_tables.include?(table)
|
49
|
-
|
50
|
-
primary_key_columns = []
|
51
|
-
primary_key_column_count = db.schema(table).count { |column| column[1][:primary_key] == true }
|
52
|
-
|
53
|
-
schema << "CREATE TABLE #{table}(\n"
|
54
|
-
db.schema(table).each do |column|
|
55
|
-
schema << "#{column[0]} #{column[1][:type]}"
|
56
|
-
if column[1][:primary_key] == true
|
57
|
-
schema << " PRIMARY KEY" if primary_key_column_count == 1
|
58
|
-
else
|
59
|
-
primary_key_columns << column[0]
|
60
|
-
end
|
61
|
-
schema << ",\n" unless column == db.schema(table).last && primary_key_column_count == 1
|
62
|
-
end
|
63
|
-
if primary_key_column_count > 1
|
64
|
-
schema << "PRIMARY KEY (#{primary_key_columns.join(",")})"
|
65
|
-
end
|
66
|
-
db.foreign_key_list(table).each do |fk|
|
67
|
-
schema << ",\n" if fk == db.foreign_key_list(table).first
|
68
|
-
schema << "FOREIGN KEY (#{fk[:columns][0]}) REFERENCES #{fk[:table]}(#{fk[:key][0]})"
|
69
|
-
schema << ",\n" unless fk == db.foreign_key_list(table).last
|
70
|
-
end
|
71
|
-
schema << ");\n"
|
72
|
-
end
|
73
|
-
schema
|
74
|
-
end
|
75
|
-
|
76
|
-
#
|
77
|
-
# Evaluates a sql expression
|
78
|
-
#
|
79
|
-
# @param input [String] sql expression
|
80
|
-
# @return [Array] results
|
81
|
-
#
|
82
|
-
def execute(input:)
|
83
|
-
Langchain.logger.info("Executing \"#{input}\"", for: self.class)
|
84
|
-
|
85
|
-
db[input].to_a
|
86
|
-
rescue Sequel::DatabaseError => e
|
87
|
-
Langchain.logger.error(e.message, for: self.class)
|
88
|
-
end
|
89
|
-
end
|
90
|
-
end
|