langchainrb 0.2.0 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c43d156af4f33487e653c0f72d0a27a327ebe07b1ecc7558409b8d877854318c
4
- data.tar.gz: dba1509aa4494e1a35a08a40ca872bcb9f0b3ad03b7f17c6e8c3716c7d5ce805
3
+ metadata.gz: 1d43d7b8fab03608be188730dd7e67947e161176ea11ad29a3d6a3b3469045da
4
+ data.tar.gz: 2a0cf937f20dcb620fde4fac5bfdd1cbafee92d1212ab61fc25db352c3bce79f
5
5
  SHA512:
6
- metadata.gz: b800cf71cb8c9193082383d9994f1f40f61d7479e1db57ad970a089dd26e749be684f8f17b388a704aaba595909acb5babe9fa4ab37836953f0d11dbc55efa66
7
- data.tar.gz: 9fdb6337593f3fa5902af104255a3e9f9a06214965b8dcb026d3b914094804b1d546c7b46c0e923d8327a216639ee4eb64e0febeafa9616c73d3706d74032037
6
+ metadata.gz: 2eb42e6ea5ee796bd7b78addd9c3cc75449ca84a680c0b50668eb678e14cb4991fe38577a323acbd628b30b3753a2197da8a34ef908dac15402277e078a8c287
7
+ data.tar.gz: 1a1d51903ef37908b2f0f5a2ded0542a04f5e812ff4c2fa37b67dbaa80636d50a70d7cf8bf05bfceedbe21a016d6dcb6e98a9fa3e25981feb05f4a4cec2ca52f
data/CHANGELOG.md CHANGED
@@ -1,4 +1,11 @@
1
1
  ## [Unreleased]
2
+ ## [0.3.0] - 2023-05-12
3
+
4
+ - Agents
5
+ - Introducing `Agent::ChainOfThoughtAgent`, a semi-autonomous bot that uses Tools to retrieve additional information in order to make best-effort informed replies to user's questions.
6
+ - Tools
7
+ - Introducing `Tool::Calculator` tool that solves mathematical expressions.
8
+ - Introducing `Tool::Search` tool that executes Google Searches.
2
9
 
3
10
  ## [0.2.0] - 2023-05-09
4
11
 
data/Gemfile.lock CHANGED
@@ -1,8 +1,10 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.2.0)
5
- cohere-ruby (~> 0.9.1)
4
+ langchainrb (0.3.0)
5
+ cohere-ruby (~> 0.9.3)
6
+ eqn (~> 1.6.5)
7
+ google_search_results (~> 2.0.0)
6
8
  milvus (~> 0.9.0)
7
9
  pinecone (~> 0.1.6)
8
10
  qdrant-ruby (~> 0.9.0)
@@ -33,8 +35,9 @@ GEM
33
35
  builder (3.2.4)
34
36
  byebug (11.1.3)
35
37
  coderay (1.1.3)
36
- cohere-ruby (0.9.1)
37
- faraday (~> 2.7.0)
38
+ cohere-ruby (0.9.3)
39
+ faraday (~> 1)
40
+ faraday_middleware (~> 1)
38
41
  concurrent-ruby (1.2.2)
39
42
  crass (1.0.6)
40
43
  diff-lcs (1.5.0)
@@ -79,15 +82,38 @@ GEM
79
82
  dry-initializer (~> 3.0)
80
83
  dry-schema (>= 1.12, < 2)
81
84
  zeitwerk (~> 2.6)
85
+ eqn (1.6.5)
86
+ treetop (>= 1.2.0)
82
87
  erubi (1.12.0)
83
- faraday (2.7.4)
84
- faraday-net_http (>= 2.0, < 3.1)
88
+ faraday (1.10.3)
89
+ faraday-em_http (~> 1.0)
90
+ faraday-em_synchrony (~> 1.0)
91
+ faraday-excon (~> 1.1)
92
+ faraday-httpclient (~> 1.0)
93
+ faraday-multipart (~> 1.0)
94
+ faraday-net_http (~> 1.0)
95
+ faraday-net_http_persistent (~> 1.0)
96
+ faraday-patron (~> 1.0)
97
+ faraday-rack (~> 1.0)
98
+ faraday-retry (~> 1.0)
85
99
  ruby2_keywords (>= 0.0.4)
100
+ faraday-em_http (1.0.0)
101
+ faraday-em_synchrony (1.0.0)
102
+ faraday-excon (1.1.0)
103
+ faraday-httpclient (1.0.1)
86
104
  faraday-multipart (1.0.4)
87
105
  multipart-post (~> 2)
88
- faraday-net_http (3.0.2)
89
- graphlient (0.7.0)
90
- faraday (~> 2.0)
106
+ faraday-net_http (1.0.1)
107
+ faraday-net_http_persistent (1.2.0)
108
+ faraday-patron (1.0.0)
109
+ faraday-rack (1.0.0)
110
+ faraday-retry (1.0.3)
111
+ faraday_middleware (1.2.0)
112
+ faraday (~> 1.0)
113
+ google_search_results (2.0.1)
114
+ graphlient (0.6.0)
115
+ faraday (>= 1.0)
116
+ faraday_middleware
91
117
  graphql-client
92
118
  graphql (2.0.21)
93
119
  graphql-client (0.18.0)
@@ -99,12 +125,12 @@ GEM
99
125
  i18n (1.13.0)
100
126
  concurrent-ruby (~> 1.0)
101
127
  ice_nine (0.11.2)
102
- loofah (2.20.0)
128
+ loofah (2.21.1)
103
129
  crass (~> 1.0.2)
104
130
  nokogiri (>= 1.5.9)
105
131
  method_source (1.0.0)
106
- milvus (0.9.0)
107
- faraday (~> 2.7.0)
132
+ milvus (0.9.1)
133
+ faraday (~> 1)
108
134
  mini_mime (1.1.2)
109
135
  minitest (5.18.0)
110
136
  multi_xml (0.6.0)
@@ -117,14 +143,16 @@ GEM
117
143
  dry-struct (~> 1.6.0)
118
144
  dry-validation (~> 1.10.0)
119
145
  httparty (~> 0.21.0)
146
+ polyglot (0.3.5)
120
147
  pry (0.14.2)
121
148
  coderay (~> 1.1)
122
149
  method_source (~> 1.0)
123
150
  pry-byebug (3.10.1)
124
151
  byebug (~> 11.0)
125
152
  pry (>= 0.13, < 0.15)
126
- qdrant-ruby (0.9.0)
127
- faraday (~> 2.7)
153
+ qdrant-ruby (0.9.2)
154
+ faraday (~> 1)
155
+ faraday_middleware (~> 1)
128
156
  racc (1.6.2)
129
157
  rack (2.2.7)
130
158
  rack-test (2.1.0)
@@ -160,11 +188,14 @@ GEM
160
188
  faraday-multipart (>= 1)
161
189
  ruby2_keywords (0.0.5)
162
190
  thor (1.2.1)
191
+ treetop (1.6.12)
192
+ polyglot (~> 0.3)
163
193
  tzinfo (2.0.6)
164
194
  concurrent-ruby (~> 1.0)
165
- weaviate-ruby (0.8.0)
166
- faraday (~> 2.7)
167
- graphlient (~> 0.7.0)
195
+ weaviate-ruby (0.8.1)
196
+ faraday (~> 1)
197
+ faraday_middleware (~> 1)
198
+ graphlient (~> 0.6.0)
168
199
  zeitwerk (2.6.8)
169
200
 
170
201
  PLATFORMS
data/README.md CHANGED
@@ -26,7 +26,7 @@ If bundler is not being used to manage dependencies, install the gem by executin
26
26
  require "langchain"
27
27
  ```
28
28
 
29
- List of currently supported vector search databases and features:
29
+ #### Supported vector search databases and features:
30
30
 
31
31
  | Database | Querying | Storage | Schema Management | Backups | Rails Integration | ??? |
32
32
  | -------- |:------------------:| -------:| -----------------:| -------:| -----------------:| ---:|
@@ -35,7 +35,7 @@ List of currently supported vector search databases and features:
35
35
  | Milvus | :white_check_mark: | WIP | WIP | WIP | | |
36
36
  | Pinecone | :white_check_mark: | WIP | WIP | WIP | | |
37
37
 
38
- ### Using Vector Search Databases
38
+ ### Using Vector Search Databases 🔍
39
39
 
40
40
  Choose the LLM provider you'll be using (OpenAI or Cohere) and retrieve the API key.
41
41
 
@@ -90,7 +90,7 @@ client.ask(
90
90
  )
91
91
  ```
92
92
 
93
- ### Using Standalone LLMs
93
+ ### Using Standalone LLMs 🗣️
94
94
 
95
95
  #### OpenAI
96
96
  ```ruby
@@ -114,7 +114,7 @@ cohere.embed(text: "foo bar")
114
114
  cohere.complete(prompt: "What is the meaning of life?")
115
115
  ```
116
116
 
117
- ### Using Prompts
117
+ ### Using Prompts 📋
118
118
 
119
119
  #### Prompt Templates
120
120
 
@@ -199,16 +199,49 @@ prompt = Prompt.load_from_path(file_path: "spec/fixtures/prompt/few_shot_prompt_
199
199
  prompt.prefix # "Write antonyms for the following words."
200
200
  ```
201
201
 
202
+ ### Using Agents 🤖
203
+ Agents are semi-autonomous bots that can respond to user questions and use available to them Tools to provide informed replies. They break down problems into series of steps and define Actions (and Action Inputs) along the way that are executed and fed back to them as additional information. Once an Agent decides that it has the Final Answer it responds with it.
204
+
205
+ #### Chain-of-Thought Agent
206
+
207
+ ```ruby
208
+ agent = Agent::ChainOfThoughtAgent.new(llm: :openai, llm_api_key: ENV["OPENAI_API_KEY"], tools: ['search', 'calculator'])
209
+
210
+ agent.tools
211
+ # => ["search", "calculator"]
212
+ ```
213
+ ```ruby
214
+ agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?", logging: true)
215
+ #=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
216
+ ```
217
+
218
+ #### Demo
219
+ ![May-12-2023 13-09-13](https://github.com/andreibondarev/langchainrb/assets/541665/6bad4cd9-976c-420f-9cf9-b85bf84f7eaf)
220
+
221
+ ![May-12-2023 13-07-45](https://github.com/andreibondarev/langchainrb/assets/541665/9aacdcc7-4225-4ea0-ab96-7ee48826eb9b)
222
+
223
+ #### Available Tools 🛠️
224
+
225
+ | Name | Description | Requirements |
226
+ | -------- | :------------------: | :------------------: |
227
+ | "search" | A wrapper around Google Search | `ENV["SERP_API_KEY"]` (https://serpapi.com/manage-api-key)
228
+ | "calculator" | Useful for getting the result of a math expression | |
229
+
202
230
  ## Development
203
231
 
204
232
  After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
205
233
 
206
234
  To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
207
235
 
236
+ ## Core Contributors
237
+ [<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
238
+
208
239
  ## Honorary Contributors
209
240
  [<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
210
241
  [<img style="border-radius:50%" alt="Rafael Figueiredo" src="https://avatars.githubusercontent.com/u/35845775?v=4" width="80" height="80" class="avatar">](https://github.com/rafaelqfigueiredo)
211
242
 
243
+ (Criteria of becoming an Honorary Contributor or Core Contributor is pending...)
244
+
212
245
  ## Contributing
213
246
 
214
247
  Bug reports and pull requests are welcome on GitHub at https://github.com/andreibondarev/langchain.
data/lib/agent/base.rb ADDED
@@ -0,0 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agent
4
+ class Base
5
+ end
6
+ end
@@ -0,0 +1,108 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agent
4
+ class ChainOfThoughtAgent < Base
5
+ attr_reader :llm, :llm_api_key, :llm_client, :tools
6
+
7
+ # Initializes the Agent
8
+ #
9
+ # @param llm [Symbol] The LLM to use
10
+ # @param llm_api_key [String] The API key for the LLM
11
+ # @param tools [Array] The tools to use
12
+ # @return [ChainOfThoughtAgent] The Agent::ChainOfThoughtAgent instance
13
+ def initialize(llm:, llm_api_key:, tools: [])
14
+ LLM::Base.validate_llm!(llm: llm)
15
+ Tool::Base.validate_tools!(tools: tools)
16
+
17
+ @llm = llm
18
+ @llm_api_key = llm_api_key
19
+ @tools = tools
20
+
21
+ @llm_client = LLM.const_get(LLM::Base::LLMS.fetch(llm)).new(api_key: llm_api_key)
22
+ end
23
+
24
+ # Validate tools when they're re-assigned
25
+ #
26
+ # @param value [Array] The tools to use
27
+ # @return [Array] The tools that will be used
28
+ def tools=(value)
29
+ Tool::Base.validate_tools!(tools: value)
30
+ @tools = value
31
+ end
32
+
33
+ # Run the Agent!
34
+ #
35
+ # @param question [String] The question to ask
36
+ # @param logging [Boolean] Whether or not to log the Agent's actions
37
+ # @return [String] The answer to the question
38
+ def run(question:, logging: false)
39
+ question = question.strip
40
+ prompt = create_prompt(
41
+ question: question,
42
+ tools: tools
43
+ )
44
+
45
+ loop do
46
+ puts("Agent: Passing the prompt to the #{llm} LLM") if logging
47
+ response = llm_client.generate_completion(
48
+ prompt: prompt,
49
+ stop_sequences: ["Observation:"],
50
+ max_tokens: 500
51
+ )
52
+
53
+ # Append the response to the prompt
54
+ prompt += response;
55
+
56
+ # Find the requested action in the "Action: search" format
57
+ action = response.match(/Action: (.*)/)&.send(:[], -1)
58
+
59
+ if action
60
+ # Find the input to the action in the "Action Input: [action_input]" format
61
+ action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
62
+
63
+ puts("Agent: Using the \"#{action}\" Tool with \"#{action_input}\"") if logging
64
+
65
+ # Retrieve the Tool::[ToolName] class and call `execute`` with action_input as the input
66
+ result = Tool
67
+ .const_get(Tool::Base::TOOLS[action.strip])
68
+ .execute(input: action_input)
69
+
70
+ # Append the Observation to the prompt
71
+ if prompt.end_with?("Observation:")
72
+ prompt += " #{result}\nThought:"
73
+ else
74
+ prompt += "\nObservation: #{result}\nThought:"
75
+ end
76
+ else
77
+ # Return the final answer
78
+ break response.match(/Final Answer: (.*)/)&.send(:[], -1)
79
+ end
80
+ end
81
+ end
82
+
83
+ private
84
+
85
+ # Create the initial prompt to pass to the LLM
86
+ # @param question [String] Question to ask
87
+ # @param tools [Array] Tools to use
88
+ # @return [String] Prompt
89
+ def create_prompt(question:, tools:)
90
+ prompt_template.format(
91
+ date: Date.today.strftime("%B %d, %Y"),
92
+ question: question,
93
+ tool_names: "[#{tools.join(", ")}]",
94
+ tools: tools.map do |tool|
95
+ "#{tool}: #{Tool.const_get(Tool::Base::TOOLS[tool]).const_get("DESCRIPTION")}"
96
+ end.join("\n")
97
+ )
98
+ end
99
+
100
+ # Load the PromptTemplate from the JSON file
101
+ # @return [PromptTemplate] PromptTemplate instance
102
+ def prompt_template
103
+ @template ||= Prompt.load_from_path(
104
+ file_path: "lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json"
105
+ )
106
+ end
107
+ end
108
+ end
@@ -0,0 +1,10 @@
1
+ {
2
+ "_type": "prompt",
3
+ "template": "Today is {date} and you can use tools to get new information. Answer the following questions as best you can using the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of {tool_names}\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nBegin!\n\nQuestion: {question}\nThought:",
4
+ "input_variables": [
5
+ "date",
6
+ "question",
7
+ "tools",
8
+ "tool_names"
9
+ ]
10
+ }
data/lib/langchain.rb CHANGED
@@ -2,6 +2,11 @@
2
2
 
3
3
  require_relative "./version"
4
4
 
5
+ module Agent
6
+ autoload :Base, "agent/base"
7
+ autoload :ChainOfThoughtAgent, "agent/chain_of_thought_agent/chain_of_thought_agent.rb"
8
+ end
9
+
5
10
  module Vectorsearch
6
11
  autoload :Base, "vectorsearch/base"
7
12
  autoload :Milvus, "vectorsearch/milvus"
@@ -22,4 +27,10 @@ module Prompt
22
27
  autoload :Base, "prompt/base"
23
28
  autoload :PromptTemplate, "prompt/prompt_template"
24
29
  autoload :FewShotPromptTemplate, "prompt/few_shot_prompt_template"
25
- end
30
+ end
31
+
32
+ module Tool
33
+ autoload :Base, "tool/base"
34
+ autoload :Calculator, "tool/calculator"
35
+ autoload :SerpApi, "tool/serp_api"
36
+ end
data/lib/llm/base.rb CHANGED
@@ -14,5 +14,14 @@ module LLM
14
14
  def default_dimension
15
15
  self.class.const_get("DEFAULTS").dig(:dimension)
16
16
  end
17
+
18
+ # Ensure that the LLM value passed in is supported
19
+ # @param llm [Symbol] The LLM to use
20
+ def self.validate_llm!(llm:)
21
+ # TODO: Fix so this works when `llm` value is a string instead of a symbol
22
+ unless LLM::Base::LLMS.keys.include?(llm)
23
+ raise ArgumentError, "LLM must be one of #{LLM::Base::LLMS.keys}"
24
+ end
25
+ end
17
26
  end
18
27
  end
data/lib/llm/cohere.rb CHANGED
@@ -30,12 +30,20 @@ module LLM
30
30
  # Generate a completion for a given prompt
31
31
  # @param prompt [String] The prompt to generate a completion for
32
32
  # @return [Hash] The completion
33
- def complete(prompt:)
34
- response = client.generate(
33
+ def complete(prompt:, **params)
34
+ default_params = {
35
35
  prompt: prompt,
36
36
  temperature: DEFAULTS[:temperature],
37
- model: DEFAULTS[:completion_model_name],
38
- )
37
+ model: DEFAULTS[:completion_model_name]
38
+ }
39
+
40
+ if params[:stop_sequences]
41
+ default_params[:stop_sequences] = params.delete(:stop_sequences)
42
+ end
43
+
44
+ default_params.merge!(params)
45
+
46
+ response = client.generate(**default_params)
39
47
  response.dig("generations").first.dig("text")
40
48
  end
41
49
 
data/lib/llm/openai.rb CHANGED
@@ -33,15 +33,21 @@ module LLM
33
33
  # Generate a completion for a given prompt
34
34
  # @param prompt [String] The prompt to generate a completion for
35
35
  # @return [String] The completion
36
- def complete(prompt:)
37
- response = client.completions(
38
- parameters: {
39
- model: DEFAULTS[:completion_model_name],
40
- temperature: DEFAULTS[:temperature],
41
- prompt: prompt
42
- }
43
- )
44
- response.dig("choices").first.dig("text")
36
+ def complete(prompt:, **params)
37
+ default_params = {
38
+ model: DEFAULTS[:completion_model_name],
39
+ temperature: DEFAULTS[:temperature],
40
+ prompt: prompt
41
+ }
42
+
43
+ if params[:stop_sequences]
44
+ default_params[:stop] = params.delete(:stop_sequences)
45
+ end
46
+
47
+ default_params.merge!(params)
48
+
49
+ response = client.completions(parameters: default_params)
50
+ response.dig("choices", 0, "text")
45
51
  end
46
52
 
47
53
  alias_method :generate_completion, :complete
data/lib/tool/base.rb ADDED
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Tool
4
+ class Base
5
+ # How to add additional Tools?
6
+ # 1. Create a new file in lib/tool/your_tool_name.rb
7
+ # 2. Add your tool to the TOOLS hash below
8
+ # "your_tool_name" => "Tool::YourToolName"
9
+ # 3. Implement `self.execute(input:)` method in your tool class
10
+ # 4. Add your tool to the README.md
11
+
12
+ TOOLS = {
13
+ "calculator" => "Tool::Calculator",
14
+ "search" => "Tool::SerpApi"
15
+ }
16
+
17
+ # Executes the tool and returns the answer
18
+ # @param input [String] input to the tool
19
+ # @return [String] answer
20
+ def self.execute(input:)
21
+ raise NotImplementedError, "Your tool must implement the `self.execute(input:)` method that returns a string"
22
+ end
23
+
24
+ #
25
+ # Validates the list of strings (tools) are all supported or raises an error
26
+ # @param tools [Array<String>] list of tools to be used
27
+ #
28
+ # @raise [ArgumentError] If any of the tools are not supported
29
+ #
30
+ def self.validate_tools!(tools:)
31
+ unrecognized_tools = tools - Tool::Base::TOOLS.keys
32
+
33
+ if unrecognized_tools.any?
34
+ raise ArgumentError, "Unrecognized Tools: #{unrecognized_tools}"
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "eqn"
4
+
5
+ module Tool
6
+ class Calculator < Base
7
+ DESCRIPTION = "Useful for getting the result of a math expression. " +
8
+ "The input to this tool should be a valid mathematical expression that could be executed by a simple calculator."
9
+
10
+ # Evaluates a pure math expression or if equation contains non-math characters (e.g.: "12F in Celsius") then
11
+ # it uses the google search calculator to evaluate the expression
12
+ # @param input [String] math expression
13
+ # @return [String] Answer
14
+ def self.execute(input:)
15
+ Eqn::Calculator.calc(input)
16
+ rescue Eqn::ParseError, Eqn::NoVariableValueError
17
+ # Sometimes the input is not a pure math expression, e.g: "12F in Celsius"
18
+ # We can use the google answer box to evaluate this expression
19
+ hash_results = Tool::SerpApi.execute_search(input: input)
20
+ hash_results.dig(:answer_box, :to)
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "google_search_results"
4
+
5
+ module Tool
6
+ class SerpApi < Base
7
+ DESCRIPTION = "A wrapper around Google Search. " +
8
+ "Useful for when you need to answer questions about current events. " +
9
+ "Always one of the first options when you need to find information on internet. " +
10
+ "Input should be a search query."
11
+
12
+ # Executes Google Search and returns hash_results JSON
13
+ # @param input [String] search query
14
+ # @return [String] Answer
15
+ # TODO: Glance at all of the fields that langchain Python looks through: https://github.com/hwchase17/langchain/blob/v0.0.166/langchain/utilities/serpapi.py#L128-L156
16
+ # We may need to do the same thing here.
17
+ def self.execute(input:)
18
+ hash_results = self.execute_search(input: input)
19
+
20
+ hash_results.dig(:answer_box, :answer) ||
21
+ hash_results.dig(:answer_box, :snippet) ||
22
+ hash_results.dig(:organic_results, 0, :snippet)
23
+ end
24
+
25
+ # Executes Google Search and returns hash_results JSON
26
+ # @param input [String] search query
27
+ # @return [Hash] hash_results JSON
28
+ def self.execute_search(input:)
29
+ GoogleSearch.new(
30
+ q: input,
31
+ serp_api_key: ENV["SERP_API_KEY"]
32
+ )
33
+ .get_hash
34
+ end
35
+ end
36
+ end
@@ -11,7 +11,7 @@ module Vectorsearch
11
11
  # @param llm [Symbol] The LLM to use
12
12
  # @param llm_api_key [String] The API key for the LLM
13
13
  def initialize(llm:, llm_api_key:)
14
- validate_llm!(llm: llm)
14
+ LLM::Base.validate_llm!(llm: llm)
15
15
 
16
16
  @llm = llm
17
17
  @llm_api_key = llm_api_key
@@ -54,14 +54,5 @@ module Vectorsearch
54
54
 
55
55
  prompt_template.format(question: question)
56
56
  end
57
-
58
- private
59
-
60
- def validate_llm!(llm:)
61
- # TODO: Fix so this works when `llm` value is a string instead of a symbol
62
- unless LLM::Base::LLMS.keys.include?(llm)
63
- raise ArgumentError, "LLM must be one of #{LLM::Base::LLMS.keys}"
64
- end
65
- end
66
57
  end
67
58
  end
data/lib/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.2.0"
4
+ VERSION = "0.3.0"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-05-09 00:00:00.000000000 Z
11
+ date: 2023-05-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: pry-byebug
@@ -44,14 +44,28 @@ dependencies:
44
44
  requirements:
45
45
  - - "~>"
46
46
  - !ruby/object:Gem::Version
47
- version: 0.9.1
47
+ version: 0.9.3
48
48
  type: :runtime
49
49
  prerelease: false
50
50
  version_requirements: !ruby/object:Gem::Requirement
51
51
  requirements:
52
52
  - - "~>"
53
53
  - !ruby/object:Gem::Version
54
- version: 0.9.1
54
+ version: 0.9.3
55
+ - !ruby/object:Gem::Dependency
56
+ name: eqn
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: 1.6.5
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: 1.6.5
55
69
  - !ruby/object:Gem::Dependency
56
70
  name: milvus
57
71
  requirement: !ruby/object:Gem::Requirement
@@ -108,6 +122,20 @@ dependencies:
108
122
  - - "~>"
109
123
  - !ruby/object:Gem::Version
110
124
  version: 0.9.0
125
+ - !ruby/object:Gem::Dependency
126
+ name: google_search_results
127
+ requirement: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - "~>"
130
+ - !ruby/object:Gem::Version
131
+ version: 2.0.0
132
+ type: :runtime
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - "~>"
137
+ - !ruby/object:Gem::Version
138
+ version: 2.0.0
111
139
  - !ruby/object:Gem::Dependency
112
140
  name: weaviate-ruby
113
141
  requirement: !ruby/object:Gem::Requirement
@@ -142,6 +170,9 @@ files:
142
170
  - examples/store_and_query_with_pinecone.rb
143
171
  - examples/store_and_query_with_qdrant.rb
144
172
  - examples/store_and_query_with_weaviate.rb
173
+ - lib/agent/base.rb
174
+ - lib/agent/chain_of_thought_agent/chain_of_thought_agent.rb
175
+ - lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json
145
176
  - lib/langchain.rb
146
177
  - lib/llm/base.rb
147
178
  - lib/llm/cohere.rb
@@ -150,6 +181,9 @@ files:
150
181
  - lib/prompt/few_shot_prompt_template.rb
151
182
  - lib/prompt/loading.rb
152
183
  - lib/prompt/prompt_template.rb
184
+ - lib/tool/base.rb
185
+ - lib/tool/calculator.rb
186
+ - lib/tool/serp_api.rb
153
187
  - lib/vectorsearch/base.rb
154
188
  - lib/vectorsearch/milvus.rb
155
189
  - lib/vectorsearch/pinecone.rb