langchainrb 0.4.0 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ce8728ec2208577809174e154642db161121cb9dd49e0ec5d190d080e68b1d78
4
- data.tar.gz: bb0e0ccc4558ca849549f495a4adfacc5f7851c786869974afdaef29f0cde3ca
3
+ metadata.gz: e6e84f50b6e12bd94f5fa8de956549537f5d34b8a901bc6af3fbc5d392fc2e0a
4
+ data.tar.gz: '08c01f481d64b0c35f7e86491d1115d975497c8561f50408516fad388f084c3e'
5
5
  SHA512:
6
- metadata.gz: b2b4c27e31d730563aeca70a0aa3c4cf129e69773e34f397ba057faa8298a4368c1b9f66f925188f867f1feb47b4e07f77df702fa7c6cb76ad1e1a8464b895f6
7
- data.tar.gz: 55dd3fbc21e2cdf9bd84afcd6bb4de0f72c960dec0c6b1d2efff1f9492b3d5c7399f2d14c323597045e64eafb6f2f20992348d640317c64721fb0556f8a64126
6
+ metadata.gz: 8a1d29180f3c0cf89307413bc99e22accc6875d458b3ae12ea72d30146cf5ff172fba7047fe00b385d324057638115254c8dcc6f01459a75f96dccb9a99a301b
7
+ data.tar.gz: 070a0b6836cdb7dd356c99186964f61926498d5f585910bbb30449d4b2a12d50797797dc34c6fe3eb6a4e64c156230d23606c2ac0cf131eeccc942c6231ab3c9
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.4.2] - 2023-06-03
4
+ - 🗣️ LLMs
5
+ - Introducing support for AI21
6
+ - Better docs generation
7
+ - Refactors
8
+
9
+ ## [0.4.1] - 2023-06-02
10
+ - Beautiful colored log messages
11
+ - 🛠️ Tools
12
+ - Introducing `Langchain::Tool::RubyCodeInterpreter`, a tool executes sandboxed Ruby code
13
+
3
14
  ## [0.4.0] - 2023-06-01
4
15
  - [BREAKING] Everything is namespaced under `Langchain::` now
5
16
  - Pgvector similarity search uses the cosine distance by default now
data/Gemfile.lock CHANGED
@@ -1,7 +1,8 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.4.0)
4
+ langchainrb (0.4.2)
5
+ colorize (~> 0.8.1)
5
6
  tiktoken_ruby (~> 0.0.5)
6
7
 
7
8
  GEM
@@ -29,9 +30,11 @@ GEM
29
30
  addressable (2.8.4)
30
31
  public_suffix (>= 2.0.2, < 6.0)
31
32
  afm (0.2.2)
33
+ ai21 (0.2.0)
32
34
  ast (2.4.2)
33
35
  builder (3.2.4)
34
36
  byebug (11.1.3)
37
+ childprocess (4.1.0)
35
38
  chroma-db (0.3.0)
36
39
  dry-monads (~> 1.6)
37
40
  ruby-next-core (>= 0.15.0)
@@ -39,6 +42,7 @@ GEM
39
42
  cohere-ruby (0.9.4)
40
43
  faraday (>= 1.0.0)
41
44
  faraday_middleware (>= 1.0.0)
45
+ colorize (0.8.1)
42
46
  concurrent-ruby (1.2.2)
43
47
  crass (1.0.6)
44
48
  diff-lcs (1.5.0)
@@ -207,6 +211,7 @@ GEM
207
211
  rainbow (3.1.1)
208
212
  rake (13.0.6)
209
213
  rb_sys (0.9.78)
214
+ rdiscount (2.2.7)
210
215
  regexp_parser (2.8.0)
211
216
  replicate-ruby (0.2.2)
212
217
  addressable
@@ -250,6 +255,8 @@ GEM
250
255
  ruby-rc4 (0.1.5)
251
256
  ruby2_keywords (0.0.5)
252
257
  rubyzip (2.3.2)
258
+ safe_ruby (1.0.4)
259
+ childprocess (>= 0.3.9)
253
260
  standard (1.28.2)
254
261
  language_server-protocol (~> 3.17.0.2)
255
262
  lint_roller (~> 1.0)
@@ -281,6 +288,7 @@ GEM
281
288
  graphlient (~> 0.6.0)
282
289
  wikipedia-client (1.17.0)
283
290
  addressable (~> 2.7)
291
+ yard (0.9.34)
284
292
  zeitwerk (2.6.8)
285
293
 
286
294
  PLATFORMS
@@ -292,6 +300,7 @@ PLATFORMS
292
300
  x86_64-linux
293
301
 
294
302
  DEPENDENCIES
303
+ ai21 (~> 0.2.0)
295
304
  chroma-db (~> 0.3.0)
296
305
  cohere-ruby (~> 0.9.4)
297
306
  docx (~> 0.8.0)
@@ -310,12 +319,15 @@ DEPENDENCIES
310
319
  pry-byebug (~> 3.10.0)
311
320
  qdrant-ruby (~> 0.9.0)
312
321
  rake (~> 13.0)
322
+ rdiscount
313
323
  replicate-ruby (~> 0.2.2)
314
324
  rspec (~> 3.0)
315
325
  ruby-openai (~> 4.0.0)
326
+ safe_ruby (~> 1.0.4)
316
327
  standardrb
317
328
  weaviate-ruby (~> 0.8.0)
318
329
  wikipedia-client (~> 1.17.0)
330
+ yard
319
331
 
320
332
  BUNDLED WITH
321
333
  2.4.0
data/README.md CHANGED
@@ -109,6 +109,10 @@ Add `gem "ruby-openai", "~> 4.0.0"` to your Gemfile.
109
109
  ```ruby
110
110
  openai = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
111
111
  ```
112
+ You can pass additional parameters to the constructor, it will be passed to the OpenAI client:
113
+ ```ruby
114
+ openai = Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"], llm_options: {uri_base: "http://localhost:1234"}) )
115
+ ```
112
116
  ```ruby
113
117
  openai.embed(text: "foo bar")
114
118
  ```
@@ -260,10 +264,10 @@ agent.run(question: "How many full soccer fields would be needed to cover the di
260
264
  | Name | Description | ENV Requirements | Gem Requirements |
261
265
  | ------------ | :------------------------------------------------: | :-----------------------------------------------------------: | :---------------------------------------: |
262
266
  | "calculator" | Useful for getting the result of a math expression | | `gem "eqn", "~> 1.6.5"` |
263
- | "search" | A wrapper around Google Search | `ENV["SERPAPI_API_KEY"]` (https://serpapi.com/manage-api-key) | `gem "google_search_results", "~> 2.0.0"` | |
267
+ | "ruby_code_interpreter" | Interprets Ruby expressions | | `gem "safe_ruby", "~> 1.0.4"` |
268
+ | "search" | A wrapper around Google Search | `ENV["SERPAPI_API_KEY"]` (https://serpapi.com/manage-api-key) | `gem "google_search_results", "~> 2.0.0"` |
264
269
  | "wikipedia" | Calls Wikipedia API to retrieve the summary | | `gem "wikipedia-client", "~> 1.17.0"` |
265
270
 
266
-
267
271
  #### Loaders 🚚
268
272
 
269
273
  Need to read data from various sources? Load it up.
@@ -300,7 +304,7 @@ Additional examples available: [/examples](https://github.com/andreibondarev/lan
300
304
 
301
305
  ## Logging
302
306
 
303
- LangChain.rb uses standard logging mechanisms and defaults to `:debug` level. Most messages are at info level, but we will add debug or warn statements as needed.
307
+ LangChain.rb uses standard logging mechanisms and defaults to `:warn` level. Most messages are at info level, but we will add debug or warn statements as needed.
304
308
  To show all log messages:
305
309
 
306
310
  ```ruby
@@ -314,6 +318,9 @@ Langchain.logger.level = :info
314
318
  3. `bundle exec rake` to ensure that the tests pass and to run standardrb
315
319
  4. `bin/console` to load the gem in a REPL session. Feel free to add your own instances of LLMs, Tools, Agents, etc. and experiment with them.
316
320
 
321
+ ## Community
322
+ Join us in the [Ruby AI Builders](https://discord.gg/SBmjAnKT) Discord community in #langchainrb
323
+
317
324
  ## Core Contributors
318
325
  [<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
319
326
 
data/Rakefile CHANGED
@@ -3,6 +3,7 @@
3
3
  require "bundler/gem_tasks"
4
4
  require "rspec/core/rake_task"
5
5
  require "standard/rake"
6
+ require "yard"
6
7
 
7
8
  RSpec::Core::RakeTask.new(:spec)
8
9
 
@@ -11,3 +12,7 @@ task default: :spec
11
12
  Rake::Task["spec"].enhance do
12
13
  Rake::Task["standard:fix"].invoke
13
14
  end
15
+
16
+ YARD::Rake::YardocTask.new do |t|
17
+ t.options = ["--fail-on-warning"]
18
+ end
@@ -42,7 +42,7 @@ module Langchain::Agent
42
42
  )
43
43
 
44
44
  loop do
45
- Langchain.logger.info("Agent: Passing the prompt to the #{llm} LLM")
45
+ Langchain.logger.info("[#{self.class.name}]".red + ": Sending the prompt to the #{llm} LLM")
46
46
  response = llm_client.complete(
47
47
  prompt: prompt,
48
48
  stop_sequences: ["Observation:"],
@@ -59,12 +59,11 @@ module Langchain::Agent
59
59
  # Find the input to the action in the "Action Input: [action_input]" format
60
60
  action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
61
61
 
62
- Langchain.logger.info("Agent: Using the \"#{action}\" Tool with \"#{action_input}\"")
63
-
64
62
  # Retrieve the Tool::[ToolName] class and call `execute`` with action_input as the input
65
- result = Langchain::Tool
66
- .const_get(Langchain::Tool::Base::TOOLS[action.strip])
67
- .execute(input: action_input)
63
+ tool = Langchain::Tool.const_get(Langchain::Tool::Base::TOOLS[action.strip])
64
+ Langchain.logger.info("[#{self.class.name}]".red + ": Invoking \"#{tool}\" Tool with \"#{action_input}\"")
65
+
66
+ result = tool.execute(input: action_input)
68
67
 
69
68
  # Append the Observation to the prompt
70
69
  prompt += if prompt.end_with?("Observation:")
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ module DependencyHelper
5
+ class VersionError < ScriptError; end
6
+
7
+ # This method requires and loads the given gem, and then checks to see if the version of the gem meets the requirements listed in `langchain.gemspec`
8
+ # This solution was built to avoid auto-loading every single gem in the Gemfile when the developer will mostly likely be only using a few of them.
9
+ #
10
+ # @param gem_name [String] The name of the gem to load
11
+ # @return [Boolean] Whether or not the gem was loaded successfully
12
+ # @raise [LoadError] If the gem is not installed
13
+ # @raise [VersionError] If the gem is installed, but the version does not meet the requirements
14
+ #
15
+ def depends_on(gem_name)
16
+ gem(gem_name) # require the gem
17
+
18
+ return(true) unless defined?(Bundler) # If we're in a non-bundler environment, we're no longer able to determine if we'll meet requirements
19
+
20
+ gem_version = Gem.loaded_specs[gem_name].version
21
+ gem_requirement = Bundler.load.dependencies.find { |g| g.name == gem_name }&.requirement
22
+
23
+ raise LoadError unless gem_requirement
24
+
25
+ unless gem_requirement.satisfied_by?(gem_version)
26
+ raise VersionError, "The #{gem_name} gem is installed, but version #{gem_requirement} is required. You have #{gem_version}."
27
+ end
28
+
29
+ true
30
+ rescue LoadError
31
+ raise LoadError, "Could not load #{gem_name}. Please ensure that the #{gem_name} gem is installed."
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ class AI21 < Base
5
+ #
6
+ # Wrapper around AI21 Studio APIs.
7
+ #
8
+ # Gem requirements: gem "ai21", "~> 0.2.0"
9
+ #
10
+ # Usage:
11
+ # ai21 = Langchain::LLM::AI21.new(api_key:)
12
+ #
13
+
14
+ def initialize(api_key:)
15
+ depends_on "ai21"
16
+ require "ai21"
17
+
18
+ @client = ::AI21::Client.new(api_key)
19
+ end
20
+
21
+ #
22
+ # Generate a completion for a given prompt
23
+ #
24
+ # @param prompt [String] The prompt to generate a completion for
25
+ # @param params [Hash] The parameters to pass to the API
26
+ # @return [String] The completion
27
+ #
28
+ def complete(prompt:, **params)
29
+ response = client.complete(prompt, params)
30
+ response.dig(:completions, 0, :data, :text)
31
+ end
32
+
33
+ #
34
+ # Generate a summary for a given text
35
+ #
36
+ # @param text [String] The text to generate a summary for
37
+ # @param params [Hash] The parameters to pass to the API
38
+ # @return [String] The summary
39
+ #
40
+ def summarize(text:, **params)
41
+ response = client.summarize(text, "TEXT", params)
42
+ response.dig(:summary)
43
+ end
44
+ end
45
+ end
@@ -2,10 +2,11 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class Base
5
+ include Langchain::DependencyHelper
6
+
5
7
  attr_reader :client
6
8
 
7
9
  # Currently supported LLMs
8
- # TODO: Add support for HuggingFace and other LLMs
9
10
  LLMS = {
10
11
  cohere: "Cohere",
11
12
  google_palm: "GooglePalm",
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class Cohere < Base
5
+ #
6
+ # Wrapper around the Cohere API.
7
+ #
8
+ # Gem requirements: gem "cohere-ruby", "~> 0.9.4"
9
+ #
10
+ # Usage:
11
+ # cohere = Langchain::LLM::Cohere.new(api_key: "YOUR_API_KEY")
12
+ #
13
+
5
14
  DEFAULTS = {
6
15
  temperature: 0.0,
7
16
  completion_model_name: "base",
@@ -2,7 +2,14 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class GooglePalm < Base
5
+ #
5
6
  # Wrapper around the Google PaLM (Pathways Language Model) APIs.
7
+ #
8
+ # Gem requirements: gem "google_palm_api", "~> 0.1.0"
9
+ #
10
+ # Usage:
11
+ # google_palm = Langchain::LLM::GooglePalm.new(api_key: "YOUR_API_KEY")
12
+ #
6
13
 
7
14
  DEFAULTS = {
8
15
  temperature: 0.0,
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class HuggingFace < Base
5
+ #
6
+ # Wrapper around the HuggingFace Inference API.
7
+ #
8
+ # Gem requirements: gem "hugging-face", "~> 0.3.4"
9
+ #
10
+ # Usage:
11
+ # hf = Langchain::LLM::HuggingFace.new(api_key: "YOUR_API_KEY")
12
+ #
13
+
5
14
  # The gem does not currently accept other models:
6
15
  # https://github.com/alchaplinsky/hugging-face/blob/main/lib/hugging_face/inference_api.rb#L32-L34
7
16
  DEFAULTS = {
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class OpenAI < Base
5
+ #
6
+ # Wrapper around OpenAI APIs.
7
+ #
8
+ # Gem requirements: gem "ruby-openai", "~> 4.0.0"
9
+ #
10
+ # Usage:
11
+ # openai = Langchain::LLM::OpenAI.new(api_key:, llm_options: {})
12
+ #
13
+
5
14
  DEFAULTS = {
6
15
  temperature: 0.0,
7
16
  completion_model_name: "text-davinci-003",
@@ -10,12 +19,11 @@ module Langchain::LLM
10
19
  dimension: 1536
11
20
  }.freeze
12
21
 
13
- def initialize(api_key:)
22
+ def initialize(api_key:, llm_options: {})
14
23
  depends_on "ruby-openai"
15
24
  require "openai"
16
25
 
17
- # TODO: Add support to pass `organization_id:`
18
- @client = ::OpenAI::Client.new(access_token: api_key)
26
+ @client = ::OpenAI::Client.new(access_token: api_key, **llm_options)
19
27
  end
20
28
 
21
29
  #
@@ -24,17 +32,12 @@ module Langchain::LLM
24
32
  # @param text [String] The text to generate an embedding for
25
33
  # @return [Array] The embedding
26
34
  #
27
- def embed(text:)
28
- model = DEFAULTS[:embeddings_model_name]
35
+ def embed(text:, **params)
36
+ parameters = {model: DEFAULTS[:embeddings_model_name], input: text}
29
37
 
30
- Langchain::Utils::TokenLengthValidator.validate!(text, model)
38
+ Langchain::Utils::TokenLengthValidator.validate!(text, parameters[:model])
31
39
 
32
- response = client.embeddings(
33
- parameters: {
34
- model: model,
35
- input: text
36
- }
37
- )
40
+ response = client.embeddings(parameters: parameters.merge(params))
38
41
  response.dig("data").first.dig("embedding")
39
42
  end
40
43
 
@@ -45,23 +48,13 @@ module Langchain::LLM
45
48
  # @return [String] The completion
46
49
  #
47
50
  def complete(prompt:, **params)
48
- model = DEFAULTS[:completion_model_name]
49
-
50
- Langchain::Utils::TokenLengthValidator.validate!(prompt, model)
51
-
52
- default_params = {
53
- model: model,
54
- temperature: DEFAULTS[:temperature],
55
- prompt: prompt
56
- }
51
+ parameters = compose_parameters DEFAULTS[:completion_model_name], params
57
52
 
58
- if params[:stop_sequences]
59
- default_params[:stop] = params.delete(:stop_sequences)
60
- end
53
+ Langchain::Utils::TokenLengthValidator.validate!(prompt, parameters[:model])
61
54
 
62
- default_params.merge!(params)
55
+ parameters[:prompt] = prompt
63
56
 
64
- response = client.completions(parameters: default_params)
57
+ response = client.completions(parameters: parameters)
65
58
  response.dig("choices", 0, "text")
66
59
  end
67
60
 
@@ -72,24 +65,13 @@ module Langchain::LLM
72
65
  # @return [String] The chat completion
73
66
  #
74
67
  def chat(prompt:, **params)
75
- model = DEFAULTS[:chat_completion_model_name]
68
+ parameters = compose_parameters DEFAULTS[:chat_completion_model_name], params
76
69
 
77
- Langchain::Utils::TokenLengthValidator.validate!(prompt, model)
70
+ Langchain::Utils::TokenLengthValidator.validate!(prompt, parameters[:model])
78
71
 
79
- default_params = {
80
- model: model,
81
- temperature: DEFAULTS[:temperature],
82
- # TODO: Figure out how to introduce persisted conversations
83
- messages: [{role: "user", content: prompt}]
84
- }
85
-
86
- if params[:stop_sequences]
87
- default_params[:stop] = params.delete(:stop_sequences)
88
- end
72
+ parameters[:messages] = [{role: "user", content: prompt}]
89
73
 
90
- default_params.merge!(params)
91
-
92
- response = client.chat(parameters: default_params)
74
+ response = client.chat(parameters: parameters)
93
75
  response.dig("choices", 0, "message", "content")
94
76
  end
95
77
 
@@ -112,5 +94,15 @@ module Langchain::LLM
112
94
  max_tokens: 2048
113
95
  )
114
96
  end
97
+
98
+ private
99
+
100
+ def compose_parameters(model, params)
101
+ default_params = {model: model, temperature: DEFAULTS[:temperature]}
102
+
103
+ default_params[:stop] = params.delete(:stop_sequences) if params[:stop_sequences]
104
+
105
+ default_params.merge(params)
106
+ end
115
107
  end
116
108
  end
@@ -2,7 +2,11 @@
2
2
 
3
3
  module Langchain::LLM
4
4
  class Replicate < Base
5
+ #
5
6
  # Wrapper around Replicate.com LLM provider
7
+ #
8
+ # Gem requirements: gem "replicate-ruby", "~> 0.2.2"
9
+ #
6
10
  # Use it directly:
7
11
  # replicate = LLM::Replicate.new(api_key: ENV["REPLICATE_API_KEY"])
8
12
  #
@@ -3,6 +3,8 @@
3
3
  module Langchain
4
4
  module Processors
5
5
  class Base
6
+ include Langchain::DependencyHelper
7
+
6
8
  EXTENSIONS = []
7
9
  CONTENT_TYPES = []
8
10
 
@@ -64,9 +64,9 @@ module Langchain::Prompt
64
64
  #
65
65
  # This method takes a template string and returns an array of input variable names
66
66
  # contained within the template. Input variables are defined as text enclosed in
67
- # curly braces (e.g. "{variable_name}").
67
+ # curly braces (e.g. <code>\{variable_name\}</code>).
68
68
  #
69
- # Content within two consecutive curly braces (e.g. "{{ignore_me}}) are ignored.
69
+ # Content within two consecutive curly braces (e.g. <code>\{\{ignore_me}}</code>) are ignored.
70
70
  #
71
71
  # @param template [String] The template string to extract variables from.
72
72
  #
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "strscan"
4
4
  require "pathname"
5
+ require "json"
5
6
 
6
7
  module Langchain::Prompt
7
8
  TYPE_TO_LOADER = {
@@ -20,7 +20,7 @@ module Langchain::Prompt
20
20
  end
21
21
 
22
22
  #
23
- # Format the prompt with the inputs. Double {{}} replaced with single {} to adhere to f-string spec.
23
+ # Format the prompt with the inputs. Double <code>{{}}</code> replaced with single <code>{}</code> to adhere to f-string spec.
24
24
  #
25
25
  # @param kwargs [Hash] Any arguments to be passed to the prompt template.
26
26
  # @return [String] A formatted string.
@@ -2,6 +2,8 @@
2
2
 
3
3
  module Langchain::Tool
4
4
  class Base
5
+ include Langchain::DependencyHelper
6
+
5
7
  # How to add additional Tools?
6
8
  # 1. Create a new file in lib/tool/your_tool_name.rb
7
9
  # 2. Add your tool to the TOOLS hash below
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Tool
4
4
  class Calculator < Base
5
+ #
6
+ # A calculator tool that falls back to the Google calculator widget
7
+ #
8
+ # Gem requirements:
9
+ # gem "eqn", "~> 1.6.5"
10
+ # gem "google_search_results", "~> 2.0.0"
11
+ # ENV requirements: ENV["SERPAPI_API_KEY"]
12
+ #
13
+
5
14
  description <<~DESC
6
15
  Useful for getting the result of a math expression.
7
16
 
@@ -18,6 +27,8 @@ module Langchain::Tool
18
27
  # @param input [String] math expression
19
28
  # @return [String] Answer
20
29
  def execute(input:)
30
+ Langchain.logger.info("[#{self.class.name}]".light_blue + ": Executing \"#{input}\"")
31
+
21
32
  Eqn::Calculator.calc(input)
22
33
  rescue Eqn::ParseError, Eqn::NoVariableValueError
23
34
  # Sometimes the input is not a pure math expression, e.g: "12F in Celsius"
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::Tool
4
+ class RubyCodeInterpreter < Base
5
+ #
6
+ # A tool that execute Ruby code in a sandboxed environment.
7
+ #
8
+ # Gem requirements: gem "safe_ruby", "~> 1.0.4"
9
+ #
10
+
11
+ description <<~DESC
12
+ A Ruby code interpreter. Use this to execute ruby expressions. Input should be a valid ruby expression. If you want to see the output of the tool, make sure to return a value.
13
+ DESC
14
+
15
+ def initialize(timeout: 30)
16
+ @timeout = timeout
17
+ depends_on "safe_ruby"
18
+ require "safe_ruby"
19
+ end
20
+
21
+ # @param input [String] ruby code expression
22
+ # @return [String] Answer
23
+ def execute(input:)
24
+ Langchain.logger.info("[#{self.class.name}]".light_blue + ": Executing \"#{input}\"")
25
+
26
+ safe_eval(input)
27
+ end
28
+
29
+ def safe_eval(code)
30
+ SafeRuby.eval(code, timeout: @timeout)
31
+ end
32
+ end
33
+ end
@@ -2,8 +2,12 @@
2
2
 
3
3
  module Langchain::Tool
4
4
  class SerpApi < Base
5
+ #
5
6
  # Wrapper around SerpAPI
6
- # Set ENV["SERPAPI_API_KEY"] to use it
7
+ #
8
+ # Gem requirements: gem "google_search_results", "~> 2.0.0"
9
+ # ENV requirements: ENV["SERPAPI_API_KEY"] # https://serpapi.com/manage-api-key)
10
+ #
7
11
 
8
12
  description <<~DESC
9
13
  A wrapper around Google Search.
@@ -33,6 +37,8 @@ module Langchain::Tool
33
37
  # TODO: Glance at all of the fields that langchain Python looks through: https://github.com/hwchase17/langchain/blob/v0.0.166/langchain/utilities/serpapi.py#L128-L156
34
38
  # We may need to do the same thing here.
35
39
  def execute(input:)
40
+ Langchain.logger.info("[#{self.class.name}]".light_blue + ": Executing \"#{input}\"")
41
+
36
42
  hash_results = execute_search(input: input)
37
43
 
38
44
  hash_results.dig(:answer_box, :answer) ||
@@ -2,7 +2,11 @@
2
2
 
3
3
  module Langchain::Tool
4
4
  class Wikipedia < Base
5
+ #
5
6
  # Tool that adds the capability to search using the Wikipedia API
7
+ #
8
+ # Gem requirements: gem "wikipedia-client", "~> 1.17.0"
9
+ #
6
10
 
7
11
  description <<~DESC
8
12
  A wrapper around Wikipedia.
@@ -22,6 +26,8 @@ module Langchain::Tool
22
26
  # @param input [String] search query
23
27
  # @return [String] Answer
24
28
  def execute(input:)
29
+ Langchain.logger.info("[#{self.class.name}]".light_blue + ": Executing \"#{input}\"")
30
+
25
31
  page = ::Wikipedia.find(input)
26
32
  # It would be nice to figure out a way to provide page.content but the LLM token limit is an issue
27
33
  page.summary
@@ -4,6 +4,7 @@ require "forwardable"
4
4
 
5
5
  module Langchain::Vectorsearch
6
6
  class Base
7
+ include Langchain::DependencyHelper
7
8
  extend Forwardable
8
9
 
9
10
  attr_reader :client, :index_name, :llm, :llm_api_key, :llm_client
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Vectorsearch
4
4
  class Chroma < Base
5
+ #
6
+ # Wrapper around Chroma DB
7
+ #
8
+ # Gem requirements: gem "chroma-db", "~> 0.3.0"
9
+ #
10
+ # Usage:
11
+ # chroma = Langchain::Vectorsearch::Chroma.new(url:, index_name:, llm:, llm_api_key:, api_key: nil)
12
+ #
13
+
5
14
  # Initialize the Chroma client
6
15
  # @param url [String] The URL of the Qdrant server
7
16
  # @param api_key [String] The API key to use
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Vectorsearch
4
4
  class Milvus < Base
5
+ #
6
+ # Wrapper around Milvus REST APIs.
7
+ #
8
+ # Gem requirements: gem "milvus", "~> 0.9.0"
9
+ #
10
+ # Usage:
11
+ # milvus = Langchain::Vectorsearch::Milvus.new(url:, index_name:, llm:, llm_api_key:)
12
+ #
13
+
5
14
  def initialize(url:, index_name:, llm:, llm_api_key:, api_key: nil)
6
15
  depends_on "milvus"
7
16
  require "milvus"
@@ -1,8 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain::Vectorsearch
4
- # The PostgreSQL vector search adapter
5
4
  class Pgvector < Base
5
+ #
6
+ # The PostgreSQL vector search adapter
7
+ #
8
+ # Gem requirements: gem "pgvector", "~> 0.2"
9
+ #
10
+ # Usage:
11
+ # pgvector = Langchain::Vectorsearch::Pgvector.new(url:, index_name:, llm:, llm_api_key:)
12
+ #
13
+
6
14
  # The operators supported by the PostgreSQL vector search adapter
7
15
  OPERATORS = {
8
16
  "cosine_distance" => "<=>",
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Vectorsearch
4
4
  class Pinecone < Base
5
+ #
6
+ # Wrapper around Pinecone API.
7
+ #
8
+ # Gem requirements: gem "pinecone", "~> 0.1.6"
9
+ #
10
+ # Usage:
11
+ # pinecone = Langchain::Vectorsearch::Pinecone.new(environment:, api_key:, index_name:, llm:, llm_api_key:)
12
+ #
13
+
5
14
  # Initialize the Pinecone client
6
15
  # @param environment [String] The environment to use
7
16
  # @param api_key [String] The API key to use
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Vectorsearch
4
4
  class Qdrant < Base
5
+ #
6
+ # Wrapper around Qdrant
7
+ #
8
+ # Gem requirements: gem "qdrant-ruby", "~> 0.9.0"
9
+ #
10
+ # Usage:
11
+ # qdrant = Langchain::Vectorsearch::Qdrant.new(url:, api_key:, index_name:, llm:, llm_api_key:)
12
+ #
13
+
5
14
  # Initialize the Qdrant client
6
15
  # @param url [String] The URL of the Qdrant server
7
16
  # @param api_key [String] The API key to use
@@ -2,6 +2,15 @@
2
2
 
3
3
  module Langchain::Vectorsearch
4
4
  class Weaviate < Base
5
+ #
6
+ # Wrapper around Weaviate
7
+ #
8
+ # Gem requirements: gem "weaviate-ruby", "~> 0.8.0"
9
+ #
10
+ # Usage:
11
+ # weaviate = Langchain::Vectorsearch::Weaviate.new(url:, api_key:, index_name:, llm:, llm_api_key:)
12
+ #
13
+
5
14
  # Initialize the Weaviate adapter
6
15
  # @param url [String] The URL of the Weaviate instance
7
16
  # @param api_key [String] The API key to use
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.4.0"
4
+ VERSION = "0.4.2"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -2,9 +2,9 @@
2
2
 
3
3
  require "logger"
4
4
  require "pathname"
5
+ require "colorize"
5
6
 
6
- require_relative "./version"
7
- require_relative "./dependency_helper"
7
+ require_relative "./langchain/version"
8
8
 
9
9
  module Langchain
10
10
  class << self
@@ -13,12 +13,13 @@ module Langchain
13
13
  attr_reader :root
14
14
  end
15
15
 
16
- @logger ||= ::Logger.new($stdout, level: :warn, formatter: ->(severity, datetime, progname, msg) { "[LangChain.rb] #{msg}\n" })
16
+ @logger ||= ::Logger.new($stdout, level: :warn, formatter: ->(severity, datetime, progname, msg) { "[LangChain.rb]".yellow + " #{msg}\n" })
17
17
 
18
18
  @root = Pathname.new(__dir__)
19
19
 
20
20
  autoload :Loader, "langchain/loader"
21
21
  autoload :Data, "langchain/data"
22
+ autoload :DependencyHelper, "langchain/dependency_helper"
22
23
 
23
24
  module Agent
24
25
  autoload :Base, "langchain/agent/base"
@@ -28,6 +29,7 @@ module Langchain
28
29
  module Tool
29
30
  autoload :Base, "langchain/tool/base"
30
31
  autoload :Calculator, "langchain/tool/calculator"
32
+ autoload :RubyCodeInterpreter, "langchain/tool/ruby_code_interpreter"
31
33
  autoload :SerpApi, "langchain/tool/serp_api"
32
34
  autoload :Wikipedia, "langchain/tool/wikipedia"
33
35
  end
@@ -58,6 +60,7 @@ module Langchain
58
60
  end
59
61
 
60
62
  module LLM
63
+ autoload :AI21, "langchain/llm/ai21"
61
64
  autoload :Base, "langchain/llm/base"
62
65
  autoload :Cohere, "langchain/llm/cohere"
63
66
  autoload :GooglePalm, "langchain/llm/google_palm"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.0
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-06-01 00:00:00.000000000 Z
11
+ date: 2023-06-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: tiktoken_ruby
@@ -24,6 +24,20 @@ dependencies:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
26
  version: 0.0.5
27
+ - !ruby/object:Gem::Dependency
28
+ name: colorize
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: 0.8.1
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: 0.8.1
27
41
  - !ruby/object:Gem::Dependency
28
42
  name: dotenv-rails
29
43
  requirement: !ruby/object:Gem::Requirement
@@ -52,6 +66,48 @@ dependencies:
52
66
  - - "~>"
53
67
  - !ruby/object:Gem::Version
54
68
  version: 3.10.0
69
+ - !ruby/object:Gem::Dependency
70
+ name: yard
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: '0'
76
+ type: :development
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - ">="
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ - !ruby/object:Gem::Dependency
84
+ name: rdiscount
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - ">="
88
+ - !ruby/object:Gem::Version
89
+ version: '0'
90
+ type: :development
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - ">="
95
+ - !ruby/object:Gem::Version
96
+ version: '0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: ai21
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: 0.2.0
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: 0.2.0
55
111
  - !ruby/object:Gem::Dependency
56
112
  name: chroma-db
57
113
  requirement: !ruby/object:Gem::Requirement
@@ -276,6 +332,20 @@ dependencies:
276
332
  - - "~>"
277
333
  - !ruby/object:Gem::Version
278
334
  version: 4.0.0
335
+ - !ruby/object:Gem::Dependency
336
+ name: safe_ruby
337
+ requirement: !ruby/object:Gem::Requirement
338
+ requirements:
339
+ - - "~>"
340
+ - !ruby/object:Gem::Version
341
+ version: 1.0.4
342
+ type: :development
343
+ prerelease: false
344
+ version_requirements: !ruby/object:Gem::Requirement
345
+ requirements:
346
+ - - "~>"
347
+ - !ruby/object:Gem::Version
348
+ version: 1.0.4
279
349
  - !ruby/object:Gem::Dependency
280
350
  name: weaviate-ruby
281
351
  requirement: !ruby/object:Gem::Requirement
@@ -325,12 +395,13 @@ files:
325
395
  - examples/store_and_query_with_pinecone.rb
326
396
  - examples/store_and_query_with_qdrant.rb
327
397
  - examples/store_and_query_with_weaviate.rb
328
- - lib/dependency_helper.rb
329
398
  - lib/langchain.rb
330
399
  - lib/langchain/agent/base.rb
331
400
  - lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent.rb
332
401
  - lib/langchain/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json
333
402
  - lib/langchain/data.rb
403
+ - lib/langchain/dependency_helper.rb
404
+ - lib/langchain/llm/ai21.rb
334
405
  - lib/langchain/llm/base.rb
335
406
  - lib/langchain/llm/cohere.rb
336
407
  - lib/langchain/llm/google_palm.rb
@@ -353,6 +424,7 @@ files:
353
424
  - lib/langchain/prompt/prompt_template.rb
354
425
  - lib/langchain/tool/base.rb
355
426
  - lib/langchain/tool/calculator.rb
427
+ - lib/langchain/tool/ruby_code_interpreter.rb
356
428
  - lib/langchain/tool/serp_api.rb
357
429
  - lib/langchain/tool/wikipedia.rb
358
430
  - lib/langchain/utils/token_length_validator.rb
@@ -363,8 +435,8 @@ files:
363
435
  - lib/langchain/vectorsearch/pinecone.rb
364
436
  - lib/langchain/vectorsearch/qdrant.rb
365
437
  - lib/langchain/vectorsearch/weaviate.rb
438
+ - lib/langchain/version.rb
366
439
  - lib/langchainrb.rb
367
- - lib/version.rb
368
440
  - sig/langchain.rbs
369
441
  homepage: https://rubygems.org/gems/langchainrb
370
442
  licenses:
@@ -1,30 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- VersionError = Class.new(ScriptError)
4
-
5
- # This method requires and loads the given gem, and then checks to see if the version of the gem meets the requirements listed in `langchain.gemspec`
6
- # This solution was built to avoid auto-loading every single gem in the Gemfile when the developer will mostly likely be only using a few of them.
7
- #
8
- # @param gem_name [String] The name of the gem to load
9
- # @return [Boolean] Whether or not the gem was loaded successfully
10
- # @raise [LoadError] If the gem is not installed
11
- # @raise [VersionError] If the gem is installed, but the version does not meet the requirements
12
- #
13
- def depends_on(gem_name)
14
- gem(gem_name) # require the gem
15
-
16
- return(true) unless defined?(Bundler) # If we're in a non-bundler environment, we're no longer able to determine if we'll meet requirements
17
-
18
- gem_version = Gem.loaded_specs[gem_name].version
19
- gem_requirement = Bundler.load.dependencies.find { |g| g.name == gem_name }&.requirement
20
-
21
- raise LoadError unless gem_requirement
22
-
23
- unless gem_requirement.satisfied_by?(gem_version)
24
- raise VersionError, "The #{gem_name} gem is installed, but version #{gem_requirement} is required. You have #{gem_version}."
25
- end
26
-
27
- true
28
- rescue LoadError
29
- raise LoadError, "Could not load #{gem_name}. Please ensure that the #{gem_name} gem is installed."
30
- end