langchainrb 0.1.4 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +19 -1
- data/Gemfile.lock +48 -17
- data/README.md +125 -3
- data/examples/create_and_manage_few_shot_prompt_templates.rb +36 -0
- data/examples/create_and_manage_prompt_templates.rb +21 -0
- data/lib/agent/base.rb +6 -0
- data/lib/agent/chain_of_thought_agent/chain_of_thought_agent.rb +108 -0
- data/lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json +10 -0
- data/lib/langchain.rb +20 -1
- data/lib/llm/base.rb +9 -0
- data/lib/llm/cohere.rb +12 -4
- data/lib/llm/openai.rb +15 -9
- data/lib/prompt/base.rb +86 -0
- data/lib/prompt/few_shot_prompt_template.rb +73 -0
- data/lib/prompt/loading.rb +87 -0
- data/lib/prompt/prompt_template.rb +62 -0
- data/lib/tool/base.rb +38 -0
- data/lib/tool/calculator.rb +23 -0
- data/lib/tool/serp_api.rb +36 -0
- data/lib/vectorsearch/base.rb +16 -16
- data/lib/version.rb +1 -1
- metadata +44 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1d43d7b8fab03608be188730dd7e67947e161176ea11ad29a3d6a3b3469045da
|
4
|
+
data.tar.gz: 2a0cf937f20dcb620fde4fac5bfdd1cbafee92d1212ab61fc25db352c3bce79f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2eb42e6ea5ee796bd7b78addd9c3cc75449ca84a680c0b50668eb678e14cb4991fe38577a323acbd628b30b3753a2197da8a34ef908dac15402277e078a8c287
|
7
|
+
data.tar.gz: 1a1d51903ef37908b2f0f5a2ded0542a04f5e812ff4c2fa37b67dbaa80636d50a70d7cf8bf05bfceedbe21a016d6dcb6e98a9fa3e25981feb05f4a4cec2ca52f
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
## [Unreleased]
|
2
|
+
## [0.3.0] - 2023-05-12
|
2
3
|
|
3
|
-
|
4
|
+
- Agents
|
5
|
+
- Introducing `Agent::ChainOfThoughtAgent`, a semi-autonomous bot that uses Tools to retrieve additional information in order to make best-effort informed replies to user's questions.
|
6
|
+
- Tools
|
7
|
+
- Introducing `Tool::Calculator` tool that solves mathematical expressions.
|
8
|
+
- Introducing `Tool::Search` tool that executes Google Searches.
|
9
|
+
|
10
|
+
## [0.2.0] - 2023-05-09
|
11
|
+
|
12
|
+
- Prompt Templating
|
13
|
+
- Ability to create prompt templates and save them to JSON files
|
14
|
+
- Default `Prompt::FewShotPromptTemplate`
|
15
|
+
- New examples added to `examples/`
|
16
|
+
|
17
|
+
## [0.1.4] - 2023-05-02
|
18
|
+
|
19
|
+
- Backfilling missing specs
|
20
|
+
|
21
|
+
## [0.1.3] - 2023-05-01
|
4
22
|
|
5
23
|
- Initial release
|
data/Gemfile.lock
CHANGED
@@ -1,8 +1,10 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
langchainrb (0.
|
5
|
-
cohere-ruby (~> 0.9.
|
4
|
+
langchainrb (0.3.0)
|
5
|
+
cohere-ruby (~> 0.9.3)
|
6
|
+
eqn (~> 1.6.5)
|
7
|
+
google_search_results (~> 2.0.0)
|
6
8
|
milvus (~> 0.9.0)
|
7
9
|
pinecone (~> 0.1.6)
|
8
10
|
qdrant-ruby (~> 0.9.0)
|
@@ -33,8 +35,9 @@ GEM
|
|
33
35
|
builder (3.2.4)
|
34
36
|
byebug (11.1.3)
|
35
37
|
coderay (1.1.3)
|
36
|
-
cohere-ruby (0.9.
|
37
|
-
faraday (~>
|
38
|
+
cohere-ruby (0.9.3)
|
39
|
+
faraday (~> 1)
|
40
|
+
faraday_middleware (~> 1)
|
38
41
|
concurrent-ruby (1.2.2)
|
39
42
|
crass (1.0.6)
|
40
43
|
diff-lcs (1.5.0)
|
@@ -79,15 +82,38 @@ GEM
|
|
79
82
|
dry-initializer (~> 3.0)
|
80
83
|
dry-schema (>= 1.12, < 2)
|
81
84
|
zeitwerk (~> 2.6)
|
85
|
+
eqn (1.6.5)
|
86
|
+
treetop (>= 1.2.0)
|
82
87
|
erubi (1.12.0)
|
83
|
-
faraday (
|
84
|
-
faraday-
|
88
|
+
faraday (1.10.3)
|
89
|
+
faraday-em_http (~> 1.0)
|
90
|
+
faraday-em_synchrony (~> 1.0)
|
91
|
+
faraday-excon (~> 1.1)
|
92
|
+
faraday-httpclient (~> 1.0)
|
93
|
+
faraday-multipart (~> 1.0)
|
94
|
+
faraday-net_http (~> 1.0)
|
95
|
+
faraday-net_http_persistent (~> 1.0)
|
96
|
+
faraday-patron (~> 1.0)
|
97
|
+
faraday-rack (~> 1.0)
|
98
|
+
faraday-retry (~> 1.0)
|
85
99
|
ruby2_keywords (>= 0.0.4)
|
100
|
+
faraday-em_http (1.0.0)
|
101
|
+
faraday-em_synchrony (1.0.0)
|
102
|
+
faraday-excon (1.1.0)
|
103
|
+
faraday-httpclient (1.0.1)
|
86
104
|
faraday-multipart (1.0.4)
|
87
105
|
multipart-post (~> 2)
|
88
|
-
faraday-net_http (
|
89
|
-
|
90
|
-
|
106
|
+
faraday-net_http (1.0.1)
|
107
|
+
faraday-net_http_persistent (1.2.0)
|
108
|
+
faraday-patron (1.0.0)
|
109
|
+
faraday-rack (1.0.0)
|
110
|
+
faraday-retry (1.0.3)
|
111
|
+
faraday_middleware (1.2.0)
|
112
|
+
faraday (~> 1.0)
|
113
|
+
google_search_results (2.0.1)
|
114
|
+
graphlient (0.6.0)
|
115
|
+
faraday (>= 1.0)
|
116
|
+
faraday_middleware
|
91
117
|
graphql-client
|
92
118
|
graphql (2.0.21)
|
93
119
|
graphql-client (0.18.0)
|
@@ -99,12 +125,12 @@ GEM
|
|
99
125
|
i18n (1.13.0)
|
100
126
|
concurrent-ruby (~> 1.0)
|
101
127
|
ice_nine (0.11.2)
|
102
|
-
loofah (2.
|
128
|
+
loofah (2.21.1)
|
103
129
|
crass (~> 1.0.2)
|
104
130
|
nokogiri (>= 1.5.9)
|
105
131
|
method_source (1.0.0)
|
106
|
-
milvus (0.9.
|
107
|
-
faraday (~>
|
132
|
+
milvus (0.9.1)
|
133
|
+
faraday (~> 1)
|
108
134
|
mini_mime (1.1.2)
|
109
135
|
minitest (5.18.0)
|
110
136
|
multi_xml (0.6.0)
|
@@ -117,14 +143,16 @@ GEM
|
|
117
143
|
dry-struct (~> 1.6.0)
|
118
144
|
dry-validation (~> 1.10.0)
|
119
145
|
httparty (~> 0.21.0)
|
146
|
+
polyglot (0.3.5)
|
120
147
|
pry (0.14.2)
|
121
148
|
coderay (~> 1.1)
|
122
149
|
method_source (~> 1.0)
|
123
150
|
pry-byebug (3.10.1)
|
124
151
|
byebug (~> 11.0)
|
125
152
|
pry (>= 0.13, < 0.15)
|
126
|
-
qdrant-ruby (0.9.
|
127
|
-
faraday (~>
|
153
|
+
qdrant-ruby (0.9.2)
|
154
|
+
faraday (~> 1)
|
155
|
+
faraday_middleware (~> 1)
|
128
156
|
racc (1.6.2)
|
129
157
|
rack (2.2.7)
|
130
158
|
rack-test (2.1.0)
|
@@ -160,11 +188,14 @@ GEM
|
|
160
188
|
faraday-multipart (>= 1)
|
161
189
|
ruby2_keywords (0.0.5)
|
162
190
|
thor (1.2.1)
|
191
|
+
treetop (1.6.12)
|
192
|
+
polyglot (~> 0.3)
|
163
193
|
tzinfo (2.0.6)
|
164
194
|
concurrent-ruby (~> 1.0)
|
165
|
-
weaviate-ruby (0.8.
|
166
|
-
faraday (~>
|
167
|
-
|
195
|
+
weaviate-ruby (0.8.1)
|
196
|
+
faraday (~> 1)
|
197
|
+
faraday_middleware (~> 1)
|
198
|
+
graphlient (~> 0.6.0)
|
168
199
|
zeitwerk (2.6.8)
|
169
200
|
|
170
201
|
PLATFORMS
|
data/README.md
CHANGED
@@ -26,7 +26,7 @@ If bundler is not being used to manage dependencies, install the gem by executin
|
|
26
26
|
require "langchain"
|
27
27
|
```
|
28
28
|
|
29
|
-
|
29
|
+
#### Supported vector search databases and features:
|
30
30
|
|
31
31
|
| Database | Querying | Storage | Schema Management | Backups | Rails Integration | ??? |
|
32
32
|
| -------- |:------------------:| -------:| -----------------:| -------:| -----------------:| ---:|
|
@@ -35,7 +35,7 @@ List of currently supported vector search databases and features:
|
|
35
35
|
| Milvus | :white_check_mark: | WIP | WIP | WIP | | |
|
36
36
|
| Pinecone | :white_check_mark: | WIP | WIP | WIP | | |
|
37
37
|
|
38
|
-
### Using Vector Search Databases
|
38
|
+
### Using Vector Search Databases 🔍
|
39
39
|
|
40
40
|
Choose the LLM provider you'll be using (OpenAI or Cohere) and retrieve the API key.
|
41
41
|
|
@@ -90,7 +90,7 @@ client.ask(
|
|
90
90
|
)
|
91
91
|
```
|
92
92
|
|
93
|
-
### Using Standalone LLMs
|
93
|
+
### Using Standalone LLMs 🗣️
|
94
94
|
|
95
95
|
#### OpenAI
|
96
96
|
```ruby
|
@@ -114,12 +114,134 @@ cohere.embed(text: "foo bar")
|
|
114
114
|
cohere.complete(prompt: "What is the meaning of life?")
|
115
115
|
```
|
116
116
|
|
117
|
+
### Using Prompts 📋
|
118
|
+
|
119
|
+
#### Prompt Templates
|
120
|
+
|
121
|
+
Create a prompt with one input variable:
|
122
|
+
|
123
|
+
```ruby
|
124
|
+
prompt = Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke.", input_variables: ["adjective"])
|
125
|
+
prompt.format(adjective: "funny") # "Tell me a funny joke."
|
126
|
+
```
|
127
|
+
|
128
|
+
Create a prompt with multiple input variables:
|
129
|
+
|
130
|
+
```ruby
|
131
|
+
prompt = Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke about {content}.", input_variables: ["adjective", "content"])
|
132
|
+
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
133
|
+
```
|
134
|
+
|
135
|
+
Creating a PromptTemplate using just a prompt and no input_variables:
|
136
|
+
|
137
|
+
```ruby
|
138
|
+
prompt = Prompt::PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
|
139
|
+
prompt.input_variables # ["adjective", "content"]
|
140
|
+
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
141
|
+
```
|
142
|
+
|
143
|
+
Save prompt template to JSON file:
|
144
|
+
|
145
|
+
```ruby
|
146
|
+
prompt.save(file_path: "spec/fixtures/prompt/prompt_template.json")
|
147
|
+
```
|
148
|
+
|
149
|
+
Loading a new prompt template using a JSON file:
|
150
|
+
|
151
|
+
```ruby
|
152
|
+
prompt = Prompt.load_from_path(file_path: "spec/fixtures/prompt/prompt_template.json")
|
153
|
+
prompt.input_variables # ["adjective", "content"]
|
154
|
+
```
|
155
|
+
|
156
|
+
#### Few Shot Prompt Templates
|
157
|
+
|
158
|
+
Create a prompt with a few shot examples:
|
159
|
+
|
160
|
+
```ruby
|
161
|
+
prompt = Prompt::FewShotPromptTemplate.new(
|
162
|
+
prefix: "Write antonyms for the following words.",
|
163
|
+
suffix: "Input: {adjective}\nOutput:",
|
164
|
+
example_prompt: Prompt::PromptTemplate.new(
|
165
|
+
input_variables: ["input", "output"],
|
166
|
+
template: "Input: {input}\nOutput: {output}"
|
167
|
+
),
|
168
|
+
examples: [
|
169
|
+
{ "input": "happy", "output": "sad" },
|
170
|
+
{ "input": "tall", "output": "short" }
|
171
|
+
],
|
172
|
+
input_variables: ["adjective"]
|
173
|
+
)
|
174
|
+
|
175
|
+
prompt.format(adjective: "good")
|
176
|
+
|
177
|
+
# Write antonyms for the following words.
|
178
|
+
#
|
179
|
+
# Input: happy
|
180
|
+
# Output: sad
|
181
|
+
#
|
182
|
+
# Input: tall
|
183
|
+
# Output: short
|
184
|
+
#
|
185
|
+
# Input: good
|
186
|
+
# Output:
|
187
|
+
```
|
188
|
+
|
189
|
+
Save prompt template to JSON file:
|
190
|
+
|
191
|
+
```ruby
|
192
|
+
prompt.save(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
193
|
+
```
|
194
|
+
|
195
|
+
Loading a new prompt template using a JSON file:
|
196
|
+
|
197
|
+
```ruby
|
198
|
+
prompt = Prompt.load_from_path(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
199
|
+
prompt.prefix # "Write antonyms for the following words."
|
200
|
+
```
|
201
|
+
|
202
|
+
### Using Agents 🤖
|
203
|
+
Agents are semi-autonomous bots that can respond to user questions and use available to them Tools to provide informed replies. They break down problems into series of steps and define Actions (and Action Inputs) along the way that are executed and fed back to them as additional information. Once an Agent decides that it has the Final Answer it responds with it.
|
204
|
+
|
205
|
+
#### Chain-of-Thought Agent
|
206
|
+
|
207
|
+
```ruby
|
208
|
+
agent = Agent::ChainOfThoughtAgent.new(llm: :openai, llm_api_key: ENV["OPENAI_API_KEY"], tools: ['search', 'calculator'])
|
209
|
+
|
210
|
+
agent.tools
|
211
|
+
# => ["search", "calculator"]
|
212
|
+
```
|
213
|
+
```ruby
|
214
|
+
agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?", logging: true)
|
215
|
+
#=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
|
216
|
+
```
|
217
|
+
|
218
|
+
#### Demo
|
219
|
+
![May-12-2023 13-09-13](https://github.com/andreibondarev/langchainrb/assets/541665/6bad4cd9-976c-420f-9cf9-b85bf84f7eaf)
|
220
|
+
|
221
|
+
![May-12-2023 13-07-45](https://github.com/andreibondarev/langchainrb/assets/541665/9aacdcc7-4225-4ea0-ab96-7ee48826eb9b)
|
222
|
+
|
223
|
+
#### Available Tools 🛠️
|
224
|
+
|
225
|
+
| Name | Description | Requirements |
|
226
|
+
| -------- | :------------------: | :------------------: |
|
227
|
+
| "search" | A wrapper around Google Search | `ENV["SERP_API_KEY"]` (https://serpapi.com/manage-api-key)
|
228
|
+
| "calculator" | Useful for getting the result of a math expression | |
|
229
|
+
|
117
230
|
## Development
|
118
231
|
|
119
232
|
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
120
233
|
|
121
234
|
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
122
235
|
|
236
|
+
## Core Contributors
|
237
|
+
[<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
|
238
|
+
|
239
|
+
## Honorary Contributors
|
240
|
+
[<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
|
241
|
+
[<img style="border-radius:50%" alt="Rafael Figueiredo" src="https://avatars.githubusercontent.com/u/35845775?v=4" width="80" height="80" class="avatar">](https://github.com/rafaelqfigueiredo)
|
242
|
+
|
243
|
+
(Criteria of becoming an Honorary Contributor or Core Contributor is pending...)
|
244
|
+
|
123
245
|
## Contributing
|
124
246
|
|
125
247
|
Bug reports and pull requests are welcome on GitHub at https://github.com/andreibondarev/langchain.
|
@@ -0,0 +1,36 @@
|
|
1
|
+
require "langchain"
|
2
|
+
|
3
|
+
# Create a prompt with a few shot examples
|
4
|
+
prompt = Prompt::FewShotPromptTemplate.new(
|
5
|
+
prefix: "Write antonyms for the following words.",
|
6
|
+
suffix: "Input: {adjective}\nOutput:",
|
7
|
+
example_prompt: Prompt::PromptTemplate.new(
|
8
|
+
input_variables: ["input", "output"],
|
9
|
+
template: "Input: {input}\nOutput: {output}"
|
10
|
+
),
|
11
|
+
examples: [
|
12
|
+
{ "input": "happy", "output": "sad" },
|
13
|
+
{ "input": "tall", "output": "short" }
|
14
|
+
],
|
15
|
+
input_variables: ["adjective"]
|
16
|
+
)
|
17
|
+
|
18
|
+
prompt.format(adjective: "good")
|
19
|
+
|
20
|
+
# Write antonyms for the following words.
|
21
|
+
#
|
22
|
+
# Input: happy
|
23
|
+
# Output: sad
|
24
|
+
#
|
25
|
+
# Input: tall
|
26
|
+
# Output: short
|
27
|
+
#
|
28
|
+
# Input: good
|
29
|
+
# Output:
|
30
|
+
|
31
|
+
# Save prompt template to JSON file
|
32
|
+
prompt.save(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
33
|
+
|
34
|
+
# Loading a new prompt template using a JSON file
|
35
|
+
prompt = Prompt.load_from_path(file_path: "spec/fixtures/prompt/few_shot_prompt_template.json")
|
36
|
+
prompt.prefix # "Write antonyms for the following words."
|
@@ -0,0 +1,21 @@
|
|
1
|
+
require "langchain"
|
2
|
+
|
3
|
+
# Create a prompt with one input variable
|
4
|
+
prompt = Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke.", input_variables: ["adjective"])
|
5
|
+
prompt.format(adjective: "funny") # "Tell me a funny joke."
|
6
|
+
|
7
|
+
# Create a prompt with multiple input variables
|
8
|
+
prompt = Prompt::PromptTemplate.new(template: "Tell me a {adjective} joke about {content}.", input_variables: ["adjective", "content"])
|
9
|
+
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
10
|
+
|
11
|
+
# Creating a PromptTemplate using just a prompt and no input_variables
|
12
|
+
prompt = Prompt::PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
|
13
|
+
prompt.input_variables # ["adjective", "content"]
|
14
|
+
prompt.format(adjective: "funny", content: "chickens") # "Tell me a funny joke about chickens."
|
15
|
+
|
16
|
+
# Save prompt template to JSON file
|
17
|
+
prompt.save(file_path: "spec/fixtures/prompt/prompt_template.json")
|
18
|
+
|
19
|
+
# Loading a new prompt template using a JSON file
|
20
|
+
prompt = Prompt.load_from_path(file_path: "spec/fixtures/prompt/prompt_template.json")
|
21
|
+
prompt.input_variables # ["adjective", "content"]
|
data/lib/agent/base.rb
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Agent
|
4
|
+
class ChainOfThoughtAgent < Base
|
5
|
+
attr_reader :llm, :llm_api_key, :llm_client, :tools
|
6
|
+
|
7
|
+
# Initializes the Agent
|
8
|
+
#
|
9
|
+
# @param llm [Symbol] The LLM to use
|
10
|
+
# @param llm_api_key [String] The API key for the LLM
|
11
|
+
# @param tools [Array] The tools to use
|
12
|
+
# @return [ChainOfThoughtAgent] The Agent::ChainOfThoughtAgent instance
|
13
|
+
def initialize(llm:, llm_api_key:, tools: [])
|
14
|
+
LLM::Base.validate_llm!(llm: llm)
|
15
|
+
Tool::Base.validate_tools!(tools: tools)
|
16
|
+
|
17
|
+
@llm = llm
|
18
|
+
@llm_api_key = llm_api_key
|
19
|
+
@tools = tools
|
20
|
+
|
21
|
+
@llm_client = LLM.const_get(LLM::Base::LLMS.fetch(llm)).new(api_key: llm_api_key)
|
22
|
+
end
|
23
|
+
|
24
|
+
# Validate tools when they're re-assigned
|
25
|
+
#
|
26
|
+
# @param value [Array] The tools to use
|
27
|
+
# @return [Array] The tools that will be used
|
28
|
+
def tools=(value)
|
29
|
+
Tool::Base.validate_tools!(tools: value)
|
30
|
+
@tools = value
|
31
|
+
end
|
32
|
+
|
33
|
+
# Run the Agent!
|
34
|
+
#
|
35
|
+
# @param question [String] The question to ask
|
36
|
+
# @param logging [Boolean] Whether or not to log the Agent's actions
|
37
|
+
# @return [String] The answer to the question
|
38
|
+
def run(question:, logging: false)
|
39
|
+
question = question.strip
|
40
|
+
prompt = create_prompt(
|
41
|
+
question: question,
|
42
|
+
tools: tools
|
43
|
+
)
|
44
|
+
|
45
|
+
loop do
|
46
|
+
puts("Agent: Passing the prompt to the #{llm} LLM") if logging
|
47
|
+
response = llm_client.generate_completion(
|
48
|
+
prompt: prompt,
|
49
|
+
stop_sequences: ["Observation:"],
|
50
|
+
max_tokens: 500
|
51
|
+
)
|
52
|
+
|
53
|
+
# Append the response to the prompt
|
54
|
+
prompt += response;
|
55
|
+
|
56
|
+
# Find the requested action in the "Action: search" format
|
57
|
+
action = response.match(/Action: (.*)/)&.send(:[], -1)
|
58
|
+
|
59
|
+
if action
|
60
|
+
# Find the input to the action in the "Action Input: [action_input]" format
|
61
|
+
action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
|
62
|
+
|
63
|
+
puts("Agent: Using the \"#{action}\" Tool with \"#{action_input}\"") if logging
|
64
|
+
|
65
|
+
# Retrieve the Tool::[ToolName] class and call `execute`` with action_input as the input
|
66
|
+
result = Tool
|
67
|
+
.const_get(Tool::Base::TOOLS[action.strip])
|
68
|
+
.execute(input: action_input)
|
69
|
+
|
70
|
+
# Append the Observation to the prompt
|
71
|
+
if prompt.end_with?("Observation:")
|
72
|
+
prompt += " #{result}\nThought:"
|
73
|
+
else
|
74
|
+
prompt += "\nObservation: #{result}\nThought:"
|
75
|
+
end
|
76
|
+
else
|
77
|
+
# Return the final answer
|
78
|
+
break response.match(/Final Answer: (.*)/)&.send(:[], -1)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
# Create the initial prompt to pass to the LLM
|
86
|
+
# @param question [String] Question to ask
|
87
|
+
# @param tools [Array] Tools to use
|
88
|
+
# @return [String] Prompt
|
89
|
+
def create_prompt(question:, tools:)
|
90
|
+
prompt_template.format(
|
91
|
+
date: Date.today.strftime("%B %d, %Y"),
|
92
|
+
question: question,
|
93
|
+
tool_names: "[#{tools.join(", ")}]",
|
94
|
+
tools: tools.map do |tool|
|
95
|
+
"#{tool}: #{Tool.const_get(Tool::Base::TOOLS[tool]).const_get("DESCRIPTION")}"
|
96
|
+
end.join("\n")
|
97
|
+
)
|
98
|
+
end
|
99
|
+
|
100
|
+
# Load the PromptTemplate from the JSON file
|
101
|
+
# @return [PromptTemplate] PromptTemplate instance
|
102
|
+
def prompt_template
|
103
|
+
@template ||= Prompt.load_from_path(
|
104
|
+
file_path: "lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json"
|
105
|
+
)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
@@ -0,0 +1,10 @@
|
|
1
|
+
{
|
2
|
+
"_type": "prompt",
|
3
|
+
"template": "Today is {date} and you can use tools to get new information. Answer the following questions as best you can using the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of {tool_names}\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nBegin!\n\nQuestion: {question}\nThought:",
|
4
|
+
"input_variables": [
|
5
|
+
"date",
|
6
|
+
"question",
|
7
|
+
"tools",
|
8
|
+
"tool_names"
|
9
|
+
]
|
10
|
+
}
|
data/lib/langchain.rb
CHANGED
@@ -2,6 +2,11 @@
|
|
2
2
|
|
3
3
|
require_relative "./version"
|
4
4
|
|
5
|
+
module Agent
|
6
|
+
autoload :Base, "agent/base"
|
7
|
+
autoload :ChainOfThoughtAgent, "agent/chain_of_thought_agent/chain_of_thought_agent.rb"
|
8
|
+
end
|
9
|
+
|
5
10
|
module Vectorsearch
|
6
11
|
autoload :Base, "vectorsearch/base"
|
7
12
|
autoload :Milvus, "vectorsearch/milvus"
|
@@ -14,4 +19,18 @@ module LLM
|
|
14
19
|
autoload :Base, "llm/base"
|
15
20
|
autoload :Cohere, "llm/cohere"
|
16
21
|
autoload :OpenAI, "llm/openai"
|
17
|
-
end
|
22
|
+
end
|
23
|
+
|
24
|
+
module Prompt
|
25
|
+
require_relative "prompt/loading"
|
26
|
+
|
27
|
+
autoload :Base, "prompt/base"
|
28
|
+
autoload :PromptTemplate, "prompt/prompt_template"
|
29
|
+
autoload :FewShotPromptTemplate, "prompt/few_shot_prompt_template"
|
30
|
+
end
|
31
|
+
|
32
|
+
module Tool
|
33
|
+
autoload :Base, "tool/base"
|
34
|
+
autoload :Calculator, "tool/calculator"
|
35
|
+
autoload :SerpApi, "tool/serp_api"
|
36
|
+
end
|
data/lib/llm/base.rb
CHANGED
@@ -14,5 +14,14 @@ module LLM
|
|
14
14
|
def default_dimension
|
15
15
|
self.class.const_get("DEFAULTS").dig(:dimension)
|
16
16
|
end
|
17
|
+
|
18
|
+
# Ensure that the LLM value passed in is supported
|
19
|
+
# @param llm [Symbol] The LLM to use
|
20
|
+
def self.validate_llm!(llm:)
|
21
|
+
# TODO: Fix so this works when `llm` value is a string instead of a symbol
|
22
|
+
unless LLM::Base::LLMS.keys.include?(llm)
|
23
|
+
raise ArgumentError, "LLM must be one of #{LLM::Base::LLMS.keys}"
|
24
|
+
end
|
25
|
+
end
|
17
26
|
end
|
18
27
|
end
|
data/lib/llm/cohere.rb
CHANGED
@@ -30,12 +30,20 @@ module LLM
|
|
30
30
|
# Generate a completion for a given prompt
|
31
31
|
# @param prompt [String] The prompt to generate a completion for
|
32
32
|
# @return [Hash] The completion
|
33
|
-
def complete(prompt
|
34
|
-
|
33
|
+
def complete(prompt:, **params)
|
34
|
+
default_params = {
|
35
35
|
prompt: prompt,
|
36
36
|
temperature: DEFAULTS[:temperature],
|
37
|
-
model: DEFAULTS[:completion_model_name]
|
38
|
-
|
37
|
+
model: DEFAULTS[:completion_model_name]
|
38
|
+
}
|
39
|
+
|
40
|
+
if params[:stop_sequences]
|
41
|
+
default_params[:stop_sequences] = params.delete(:stop_sequences)
|
42
|
+
end
|
43
|
+
|
44
|
+
default_params.merge!(params)
|
45
|
+
|
46
|
+
response = client.generate(**default_params)
|
39
47
|
response.dig("generations").first.dig("text")
|
40
48
|
end
|
41
49
|
|
data/lib/llm/openai.rb
CHANGED
@@ -33,15 +33,21 @@ module LLM
|
|
33
33
|
# Generate a completion for a given prompt
|
34
34
|
# @param prompt [String] The prompt to generate a completion for
|
35
35
|
# @return [String] The completion
|
36
|
-
def complete(prompt
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
36
|
+
def complete(prompt:, **params)
|
37
|
+
default_params = {
|
38
|
+
model: DEFAULTS[:completion_model_name],
|
39
|
+
temperature: DEFAULTS[:temperature],
|
40
|
+
prompt: prompt
|
41
|
+
}
|
42
|
+
|
43
|
+
if params[:stop_sequences]
|
44
|
+
default_params[:stop] = params.delete(:stop_sequences)
|
45
|
+
end
|
46
|
+
|
47
|
+
default_params.merge!(params)
|
48
|
+
|
49
|
+
response = client.completions(parameters: default_params)
|
50
|
+
response.dig("choices", 0, "text")
|
45
51
|
end
|
46
52
|
|
47
53
|
alias_method :generate_completion, :complete
|
data/lib/prompt/base.rb
ADDED
@@ -0,0 +1,86 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'strscan'
|
4
|
+
|
5
|
+
module Prompt
|
6
|
+
class Base
|
7
|
+
def format(**kwargs)
|
8
|
+
raise NotImplementedError
|
9
|
+
end
|
10
|
+
|
11
|
+
def prompt_type
|
12
|
+
raise NotImplementedError
|
13
|
+
end
|
14
|
+
|
15
|
+
def to_h
|
16
|
+
raise NotImplementedError
|
17
|
+
end
|
18
|
+
|
19
|
+
#
|
20
|
+
# Validate the input variables against the template.
|
21
|
+
#
|
22
|
+
# @param template [String] The template to validate against.
|
23
|
+
# @param input_variables [Array<String>] The input variables to validate.
|
24
|
+
#
|
25
|
+
# @raise [ArgumentError] If there are missing or extra variables.
|
26
|
+
#
|
27
|
+
# @return [void]
|
28
|
+
#
|
29
|
+
def validate(template:, input_variables:)
|
30
|
+
input_variables_set = @input_variables.uniq
|
31
|
+
variables_from_template = Prompt::Base.extract_variables_from_template(template)
|
32
|
+
|
33
|
+
missing_variables = variables_from_template - input_variables_set
|
34
|
+
extra_variables = input_variables_set - variables_from_template
|
35
|
+
|
36
|
+
raise ArgumentError, "Missing variables: #{missing_variables}" if missing_variables.any?
|
37
|
+
raise ArgumentError, "Extra variables: #{extra_variables}" if extra_variables.any?
|
38
|
+
end
|
39
|
+
|
40
|
+
#
|
41
|
+
# Save the object to a file in JSON format.
|
42
|
+
#
|
43
|
+
# @param file_path [String, Pathname] The path to the file to save the object to
|
44
|
+
#
|
45
|
+
# @raise [ArgumentError] If file_path doesn't end with .json
|
46
|
+
#
|
47
|
+
# @return [void]
|
48
|
+
#
|
49
|
+
def save(file_path:)
|
50
|
+
save_path = file_path.is_a?(String) ? Pathname.new(file_path) : file_path
|
51
|
+
directory_path = save_path.dirname
|
52
|
+
FileUtils.mkdir_p(directory_path) unless directory_path.directory?
|
53
|
+
|
54
|
+
if save_path.extname == ".json"
|
55
|
+
File.open(file_path, "w") { |f| f.write(to_h.to_json) }
|
56
|
+
else
|
57
|
+
raise ArgumentError, "#{file_path} must be json"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
#
|
64
|
+
# Extracts variables from a template string.
|
65
|
+
#
|
66
|
+
# This method takes a template string and returns an array of input variable names
|
67
|
+
# contained within the template. Input variables are defined as text enclosed in
|
68
|
+
# curly braces (e.g. "{variable_name}").
|
69
|
+
#
|
70
|
+
# @param template [String] The template string to extract variables from.
|
71
|
+
#
|
72
|
+
# @return [Array<String>] An array of input variable names.
|
73
|
+
#
|
74
|
+
def self.extract_variables_from_template(template)
|
75
|
+
input_variables = []
|
76
|
+
scanner = StringScanner.new(template)
|
77
|
+
|
78
|
+
while scanner.scan_until(/\{([^{}]*)\}/)
|
79
|
+
variable = scanner[1].strip
|
80
|
+
input_variables << variable unless variable.empty?
|
81
|
+
end
|
82
|
+
|
83
|
+
input_variables
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Prompt
|
4
|
+
class FewShotPromptTemplate < Base
|
5
|
+
attr_reader :examples, :example_prompt, :input_variables, :prefix, :suffix, :example_separator
|
6
|
+
|
7
|
+
#
|
8
|
+
# Initializes a new instance of the class.
|
9
|
+
#
|
10
|
+
# @param examples [Array<Hash>] Examples to format into the prompt.
|
11
|
+
# @param example_prompt [PromptTemplate] PromptTemplate used to format an individual example.
|
12
|
+
# @param suffix [String] A prompt template string to put after the examples.
|
13
|
+
# @param input_variables [Array<String>] A list of the names of the variables the prompt template expects.
|
14
|
+
# @param example_separator [String] String separator used to join the prefix, the examples, and suffix.
|
15
|
+
# @param prefix [String] A prompt template string to put before the examples.
|
16
|
+
# @param validate_template [Boolean] Whether or not to try validating the template.
|
17
|
+
#
|
18
|
+
def initialize(
|
19
|
+
examples:,
|
20
|
+
example_prompt:,
|
21
|
+
input_variables:,
|
22
|
+
suffix:,
|
23
|
+
prefix: "",
|
24
|
+
example_separator: "\n\n",
|
25
|
+
validate_template: true
|
26
|
+
)
|
27
|
+
@examples = examples
|
28
|
+
@example_prompt = example_prompt
|
29
|
+
@input_variables = input_variables
|
30
|
+
@prefix = prefix
|
31
|
+
@suffix = suffix
|
32
|
+
@example_separator = example_separator
|
33
|
+
|
34
|
+
validate(template: @prefix + @suffix, input_variables: @input_variables) if @validate_template
|
35
|
+
end
|
36
|
+
|
37
|
+
#
|
38
|
+
# Format the prompt with the inputs.
|
39
|
+
#
|
40
|
+
# @param kwargs [Hash] Any arguments to be passed to the prompt template.
|
41
|
+
#
|
42
|
+
# @return [String] A formatted string.
|
43
|
+
#
|
44
|
+
def format(**kwargs)
|
45
|
+
example_string = @examples.map { |example| @example_prompt.format(**example) }
|
46
|
+
|
47
|
+
suffix_string = @suffix
|
48
|
+
kwargs.each { |key, value| suffix_string = suffix_string.gsub(/\{#{key}\}/, value.to_s) }
|
49
|
+
|
50
|
+
[@prefix, *example_string, suffix_string].join(@example_separator)
|
51
|
+
end
|
52
|
+
|
53
|
+
#
|
54
|
+
# Returns the key type of prompt as a string.
|
55
|
+
#
|
56
|
+
# @return [String] the prompt type key
|
57
|
+
#
|
58
|
+
def prompt_type
|
59
|
+
"few_shot"
|
60
|
+
end
|
61
|
+
|
62
|
+
def to_h
|
63
|
+
{
|
64
|
+
_type: prompt_type,
|
65
|
+
input_variables: @input_variables,
|
66
|
+
prefix: @prefix,
|
67
|
+
example_prompt: @example_prompt.to_h,
|
68
|
+
examples: @examples,
|
69
|
+
suffix: @suffix
|
70
|
+
}
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'strscan'
|
4
|
+
require 'pathname'
|
5
|
+
|
6
|
+
module Prompt
|
7
|
+
TYPE_TO_LOADER = {
|
8
|
+
"prompt" => ->(config) { Prompt.load_prompt(config) },
|
9
|
+
"few_shot" => ->(config) { Prompt.load_few_shot_prompt(config) }
|
10
|
+
}
|
11
|
+
|
12
|
+
class << self
|
13
|
+
#
|
14
|
+
# Load prompt from file.
|
15
|
+
#
|
16
|
+
# @param file_path [String, Pathname] The path of the file to read the configuration data from.
|
17
|
+
#
|
18
|
+
# @return [Object] The loaded prompt loaded.
|
19
|
+
#
|
20
|
+
# @raise [ArgumentError] If the file type of the specified file path is not supported.
|
21
|
+
#
|
22
|
+
def load_from_path(file_path:)
|
23
|
+
file_path = file_path.is_a?(String) ? Pathname.new(file_path) : file_path
|
24
|
+
|
25
|
+
if file_path.extname == ".json"
|
26
|
+
config = JSON.parse(File.read(file_path))
|
27
|
+
else
|
28
|
+
raise ArgumentError, "Got unsupported file type #{file_path.extname}"
|
29
|
+
end
|
30
|
+
|
31
|
+
load_from_config(config)
|
32
|
+
end
|
33
|
+
|
34
|
+
#
|
35
|
+
# Loads a prompt template with the given configuration.
|
36
|
+
#
|
37
|
+
# @param config [Hash] A hash containing the configuration for the prompt.
|
38
|
+
#
|
39
|
+
# @return [PromptTemplate] The loaded prompt loaded.
|
40
|
+
#
|
41
|
+
def load_prompt(config)
|
42
|
+
template, input_variables = config.values_at("template", "input_variables")
|
43
|
+
PromptTemplate.new(template: template, input_variables: input_variables)
|
44
|
+
end
|
45
|
+
|
46
|
+
#
|
47
|
+
# Loads a prompt template with the given configuration.
|
48
|
+
#
|
49
|
+
# @param config [Hash] A hash containing the configuration for the prompt.
|
50
|
+
#
|
51
|
+
# @return [FewShotPromptTemplate] The loaded prompt loaded.
|
52
|
+
#
|
53
|
+
def load_few_shot_prompt(config)
|
54
|
+
prefix, suffix, example_prompt, examples, input_variables = config.values_at("prefix", "suffix", "example_prompt", "examples", "input_variables")
|
55
|
+
example_prompt = load_prompt(example_prompt)
|
56
|
+
FewShotPromptTemplate.new(prefix: prefix, suffix: suffix, example_prompt: example_prompt, examples: examples, input_variables: input_variables)
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
|
61
|
+
#
|
62
|
+
# Loads the prompt from the given configuration hash
|
63
|
+
#
|
64
|
+
# @param config [Hash] the configuration hash to load from
|
65
|
+
#
|
66
|
+
# @return [Object] the loaded prompt
|
67
|
+
#
|
68
|
+
# @raise [ArgumentError] if the prompt type specified in the config is not supported
|
69
|
+
#
|
70
|
+
def load_from_config(config)
|
71
|
+
# If `_type` key is not present in the configuration hash, add it with a default value of `prompt`
|
72
|
+
unless config.key?("_type")
|
73
|
+
puts "[WARN] No `_type` key found, defaulting to `prompt`"
|
74
|
+
config["_type"] = "prompt"
|
75
|
+
end
|
76
|
+
|
77
|
+
# If the prompt type specified in the configuration hash is not supported, raise an exception
|
78
|
+
unless TYPE_TO_LOADER.key?(config["_type"])
|
79
|
+
raise ArgumentError, "Loading #{config["_type"]} prompt not supported"
|
80
|
+
end
|
81
|
+
|
82
|
+
# Load the prompt using the corresponding loader function from the `TYPE_TO_LOADER` hash
|
83
|
+
prompt_loader = TYPE_TO_LOADER[config["_type"]]
|
84
|
+
prompt_loader.call(config)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Prompt
|
4
|
+
class PromptTemplate < Base
|
5
|
+
attr_reader :template, :input_variables, :validate_template
|
6
|
+
|
7
|
+
#
|
8
|
+
# Initializes a new instance of the class.
|
9
|
+
#
|
10
|
+
# @param template [String] The prompt template.
|
11
|
+
# @param input_variables [Array<String>] A list of the names of the variables the prompt template expects.
|
12
|
+
# @param validate_template [Boolean] Whether or not to try validating the template.
|
13
|
+
#
|
14
|
+
def initialize(template:, input_variables:, validate_template: true)
|
15
|
+
@template = template
|
16
|
+
@input_variables = input_variables
|
17
|
+
@validate_template = validate_template
|
18
|
+
|
19
|
+
validate(template: @template, input_variables: @input_variables) if @validate_template
|
20
|
+
end
|
21
|
+
|
22
|
+
#
|
23
|
+
# Format the prompt with the inputs.
|
24
|
+
#
|
25
|
+
# @param kwargs [Hash] Any arguments to be passed to the prompt template.
|
26
|
+
# @return [String] A formatted string.
|
27
|
+
#
|
28
|
+
def format(**kwargs)
|
29
|
+
result = @template
|
30
|
+
kwargs.each { |key, value| result = result.gsub(/\{#{key}\}/, value.to_s) }
|
31
|
+
result
|
32
|
+
end
|
33
|
+
|
34
|
+
#
|
35
|
+
# Returns the key type of prompt as a string.
|
36
|
+
#
|
37
|
+
# @return [String] the prompt type key
|
38
|
+
#
|
39
|
+
def prompt_type
|
40
|
+
"prompt"
|
41
|
+
end
|
42
|
+
|
43
|
+
def to_h
|
44
|
+
{
|
45
|
+
_type: prompt_type,
|
46
|
+
input_variables: @input_variables,
|
47
|
+
template: @template
|
48
|
+
}
|
49
|
+
end
|
50
|
+
|
51
|
+
#
|
52
|
+
# Creates a new instance of the class using the given template.
|
53
|
+
#
|
54
|
+
# @param template [String] The template to use
|
55
|
+
#
|
56
|
+
# @return [Object] A new instance of the class
|
57
|
+
#
|
58
|
+
def self.from_template(template)
|
59
|
+
new(template: template, input_variables: extract_variables_from_template(template))
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
data/lib/tool/base.rb
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Tool
|
4
|
+
class Base
|
5
|
+
# How to add additional Tools?
|
6
|
+
# 1. Create a new file in lib/tool/your_tool_name.rb
|
7
|
+
# 2. Add your tool to the TOOLS hash below
|
8
|
+
# "your_tool_name" => "Tool::YourToolName"
|
9
|
+
# 3. Implement `self.execute(input:)` method in your tool class
|
10
|
+
# 4. Add your tool to the README.md
|
11
|
+
|
12
|
+
TOOLS = {
|
13
|
+
"calculator" => "Tool::Calculator",
|
14
|
+
"search" => "Tool::SerpApi"
|
15
|
+
}
|
16
|
+
|
17
|
+
# Executes the tool and returns the answer
|
18
|
+
# @param input [String] input to the tool
|
19
|
+
# @return [String] answer
|
20
|
+
def self.execute(input:)
|
21
|
+
raise NotImplementedError, "Your tool must implement the `self.execute(input:)` method that returns a string"
|
22
|
+
end
|
23
|
+
|
24
|
+
#
|
25
|
+
# Validates the list of strings (tools) are all supported or raises an error
|
26
|
+
# @param tools [Array<String>] list of tools to be used
|
27
|
+
#
|
28
|
+
# @raise [ArgumentError] If any of the tools are not supported
|
29
|
+
#
|
30
|
+
def self.validate_tools!(tools:)
|
31
|
+
unrecognized_tools = tools - Tool::Base::TOOLS.keys
|
32
|
+
|
33
|
+
if unrecognized_tools.any?
|
34
|
+
raise ArgumentError, "Unrecognized Tools: #{unrecognized_tools}"
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "eqn"
|
4
|
+
|
5
|
+
module Tool
|
6
|
+
class Calculator < Base
|
7
|
+
DESCRIPTION = "Useful for getting the result of a math expression. " +
|
8
|
+
"The input to this tool should be a valid mathematical expression that could be executed by a simple calculator."
|
9
|
+
|
10
|
+
# Evaluates a pure math expression or if equation contains non-math characters (e.g.: "12F in Celsius") then
|
11
|
+
# it uses the google search calculator to evaluate the expression
|
12
|
+
# @param input [String] math expression
|
13
|
+
# @return [String] Answer
|
14
|
+
def self.execute(input:)
|
15
|
+
Eqn::Calculator.calc(input)
|
16
|
+
rescue Eqn::ParseError, Eqn::NoVariableValueError
|
17
|
+
# Sometimes the input is not a pure math expression, e.g: "12F in Celsius"
|
18
|
+
# We can use the google answer box to evaluate this expression
|
19
|
+
hash_results = Tool::SerpApi.execute_search(input: input)
|
20
|
+
hash_results.dig(:answer_box, :to)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "google_search_results"
|
4
|
+
|
5
|
+
module Tool
|
6
|
+
class SerpApi < Base
|
7
|
+
DESCRIPTION = "A wrapper around Google Search. " +
|
8
|
+
"Useful for when you need to answer questions about current events. " +
|
9
|
+
"Always one of the first options when you need to find information on internet. " +
|
10
|
+
"Input should be a search query."
|
11
|
+
|
12
|
+
# Executes Google Search and returns hash_results JSON
|
13
|
+
# @param input [String] search query
|
14
|
+
# @return [String] Answer
|
15
|
+
# TODO: Glance at all of the fields that langchain Python looks through: https://github.com/hwchase17/langchain/blob/v0.0.166/langchain/utilities/serpapi.py#L128-L156
|
16
|
+
# We may need to do the same thing here.
|
17
|
+
def self.execute(input:)
|
18
|
+
hash_results = self.execute_search(input: input)
|
19
|
+
|
20
|
+
hash_results.dig(:answer_box, :answer) ||
|
21
|
+
hash_results.dig(:answer_box, :snippet) ||
|
22
|
+
hash_results.dig(:organic_results, 0, :snippet)
|
23
|
+
end
|
24
|
+
|
25
|
+
# Executes Google Search and returns hash_results JSON
|
26
|
+
# @param input [String] search query
|
27
|
+
# @return [Hash] hash_results JSON
|
28
|
+
def self.execute_search(input:)
|
29
|
+
GoogleSearch.new(
|
30
|
+
q: input,
|
31
|
+
serp_api_key: ENV["SERP_API_KEY"]
|
32
|
+
)
|
33
|
+
.get_hash
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
data/lib/vectorsearch/base.rb
CHANGED
@@ -11,7 +11,7 @@ module Vectorsearch
|
|
11
11
|
# @param llm [Symbol] The LLM to use
|
12
12
|
# @param llm_api_key [String] The API key for the LLM
|
13
13
|
def initialize(llm:, llm_api_key:)
|
14
|
-
validate_llm!(llm: llm)
|
14
|
+
LLM::Base.validate_llm!(llm: llm)
|
15
15
|
|
16
16
|
@llm = llm
|
17
17
|
@llm_api_key = llm_api_key
|
@@ -38,21 +38,21 @@ module Vectorsearch
|
|
38
38
|
:default_dimension
|
39
39
|
|
40
40
|
def generate_prompt(question:, context:)
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
41
|
+
prompt_template = Prompt::FewShotPromptTemplate.new(
|
42
|
+
prefix: "Context:",
|
43
|
+
suffix: "---\nQuestion: {question}\n---\nAnswer:",
|
44
|
+
example_prompt: Prompt::PromptTemplate.new(
|
45
|
+
template: "{context}",
|
46
|
+
input_variables: ["context"]
|
47
|
+
),
|
48
|
+
examples: [
|
49
|
+
{ context: context }
|
50
|
+
],
|
51
|
+
input_variables: ["question"],
|
52
|
+
example_separator: "\n"
|
53
|
+
)
|
54
|
+
|
55
|
+
prompt_template.format(question: question)
|
56
56
|
end
|
57
57
|
end
|
58
58
|
end
|
data/lib/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-05-
|
11
|
+
date: 2023-05-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: pry-byebug
|
@@ -44,14 +44,28 @@ dependencies:
|
|
44
44
|
requirements:
|
45
45
|
- - "~>"
|
46
46
|
- !ruby/object:Gem::Version
|
47
|
-
version: 0.9.
|
47
|
+
version: 0.9.3
|
48
48
|
type: :runtime
|
49
49
|
prerelease: false
|
50
50
|
version_requirements: !ruby/object:Gem::Requirement
|
51
51
|
requirements:
|
52
52
|
- - "~>"
|
53
53
|
- !ruby/object:Gem::Version
|
54
|
-
version: 0.9.
|
54
|
+
version: 0.9.3
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: eqn
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: 1.6.5
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: 1.6.5
|
55
69
|
- !ruby/object:Gem::Dependency
|
56
70
|
name: milvus
|
57
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -108,6 +122,20 @@ dependencies:
|
|
108
122
|
- - "~>"
|
109
123
|
- !ruby/object:Gem::Version
|
110
124
|
version: 0.9.0
|
125
|
+
- !ruby/object:Gem::Dependency
|
126
|
+
name: google_search_results
|
127
|
+
requirement: !ruby/object:Gem::Requirement
|
128
|
+
requirements:
|
129
|
+
- - "~>"
|
130
|
+
- !ruby/object:Gem::Version
|
131
|
+
version: 2.0.0
|
132
|
+
type: :runtime
|
133
|
+
prerelease: false
|
134
|
+
version_requirements: !ruby/object:Gem::Requirement
|
135
|
+
requirements:
|
136
|
+
- - "~>"
|
137
|
+
- !ruby/object:Gem::Version
|
138
|
+
version: 2.0.0
|
111
139
|
- !ruby/object:Gem::Dependency
|
112
140
|
name: weaviate-ruby
|
113
141
|
requirement: !ruby/object:Gem::Requirement
|
@@ -137,13 +165,25 @@ files:
|
|
137
165
|
- README.md
|
138
166
|
- Rakefile
|
139
167
|
- examples/.keep
|
168
|
+
- examples/create_and_manage_few_shot_prompt_templates.rb
|
169
|
+
- examples/create_and_manage_prompt_templates.rb
|
140
170
|
- examples/store_and_query_with_pinecone.rb
|
141
171
|
- examples/store_and_query_with_qdrant.rb
|
142
172
|
- examples/store_and_query_with_weaviate.rb
|
173
|
+
- lib/agent/base.rb
|
174
|
+
- lib/agent/chain_of_thought_agent/chain_of_thought_agent.rb
|
175
|
+
- lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json
|
143
176
|
- lib/langchain.rb
|
144
177
|
- lib/llm/base.rb
|
145
178
|
- lib/llm/cohere.rb
|
146
179
|
- lib/llm/openai.rb
|
180
|
+
- lib/prompt/base.rb
|
181
|
+
- lib/prompt/few_shot_prompt_template.rb
|
182
|
+
- lib/prompt/loading.rb
|
183
|
+
- lib/prompt/prompt_template.rb
|
184
|
+
- lib/tool/base.rb
|
185
|
+
- lib/tool/calculator.rb
|
186
|
+
- lib/tool/serp_api.rb
|
147
187
|
- lib/vectorsearch/base.rb
|
148
188
|
- lib/vectorsearch/milvus.rb
|
149
189
|
- lib/vectorsearch/pinecone.rb
|