langchainrb 0.3.2 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e751a38f6a248db9aabfac3f1b9bc547f61304dd366be999005941045a5adcd6
4
- data.tar.gz: 56db69c0a578bdf198bfc12cb53e92f0e1d15654a1dfa29715d5d7833550da33
3
+ metadata.gz: a5a782dd2282ab5dd4aed3f1d0e421a4f9b227fa4c5450ed27f2f98a86af74f4
4
+ data.tar.gz: b47bb5d6789d7abb81f56ee1beb0b52323184f578475aec3e92fcc19b4a1314a
5
5
  SHA512:
6
- metadata.gz: bdd3060863a967b48a6123ea379d9a98632730a726b8da9924bb0061511b8bf88fa05f36c49ac4b8f136472e9aa204c4bd09929d31e0f2c0700ad16361a1a8cf
7
- data.tar.gz: 88f04804d10f51d639b8643bd683b8a420920fc4ad2d26ef6c45f4fac034ec3f8b54787de51c4799f4a074029ad5649efca6e950b29fef62b96ce9452e5ec8cc
6
+ metadata.gz: 4e918b49b2b04a0e7009db732a5b24ab080a0d8d6b4c3be4084aa3a1492ddd6c1627d467b8bff4b34e1e5160b71622a75772ef92658dce2589b9542b8b0a8137
7
+ data.tar.gz: 3b465f1a05e614d64d416582aeaf7d998bf51422705331b220e08b00774d4beed4dcbc4e2a71b81bad87cddfe8c8f4c8db421d650e7161f16b26a1259922f71c
data/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.3.4] - 2023-05-16
4
+ - LLMs
5
+ - Introducing support for HuggingFace
6
+
7
+ ## [0.3.3] - 2023-05-16
8
+ - Dependencies are now optionally loaded and required at runtime
9
+ - Start using `standardrb` for linting
10
+ - Use the Ruby logger
11
+
3
12
  ## [0.3.2] - 2023-05-15
4
13
  - Agents
5
14
  - Fix Chain of Thought prompt loader
data/Gemfile CHANGED
@@ -8,3 +8,5 @@ gemspec
8
8
  gem "rake", "~> 13.0"
9
9
 
10
10
  gem "rspec", "~> 3.0"
11
+
12
+ gem "standardrb"
data/Gemfile.lock CHANGED
@@ -1,16 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.3.2)
5
- cohere-ruby (~> 0.9.3)
6
- eqn (~> 1.6.5)
7
- google_search_results (~> 2.0.0)
8
- milvus (~> 0.9.0)
9
- pinecone (~> 0.1.6)
10
- qdrant-ruby (~> 0.9.0)
11
- ruby-openai (~> 4.0.0)
12
- weaviate-ruby (~> 0.8.0)
13
- wikipedia-client (~> 1.17.0)
4
+ langchainrb (0.3.4)
14
5
 
15
6
  GEM
16
7
  remote: https://rubygems.org/
@@ -35,6 +26,7 @@ GEM
35
26
  tzinfo (~> 2.0)
36
27
  addressable (2.8.4)
37
28
  public_suffix (>= 2.0.2, < 6.0)
29
+ ast (2.4.2)
38
30
  builder (3.2.4)
39
31
  byebug (11.1.3)
40
32
  coderay (1.1.3)
@@ -125,9 +117,14 @@ GEM
125
117
  httparty (0.21.0)
126
118
  mini_mime (>= 1.0.0)
127
119
  multi_xml (>= 0.5.2)
120
+ hugging-face (0.3.2)
121
+ faraday (~> 1.0)
128
122
  i18n (1.13.0)
129
123
  concurrent-ruby (~> 1.0)
130
124
  ice_nine (0.11.2)
125
+ json (2.6.3)
126
+ language_server-protocol (3.17.0.3)
127
+ lint_roller (1.0.0)
131
128
  loofah (2.21.1)
132
129
  crass (~> 1.0.2)
133
130
  nokogiri (>= 1.5.9)
@@ -138,10 +135,15 @@ GEM
138
135
  minitest (5.18.0)
139
136
  multi_xml (0.6.0)
140
137
  multipart-post (2.3.0)
138
+ nokogiri (1.14.3-arm64-darwin)
139
+ racc (~> 1.4)
141
140
  nokogiri (1.14.3-x86_64-darwin)
142
141
  racc (~> 1.4)
143
142
  nokogiri (1.14.3-x86_64-linux)
144
143
  racc (~> 1.4)
144
+ parallel (1.23.0)
145
+ parser (3.2.2.1)
146
+ ast (~> 2.4.1)
145
147
  pinecone (0.1.71)
146
148
  dry-struct (~> 1.6.0)
147
149
  dry-validation (~> 1.10.0)
@@ -173,7 +175,10 @@ GEM
173
175
  rake (>= 12.2)
174
176
  thor (~> 1.0)
175
177
  zeitwerk (~> 2.5)
178
+ rainbow (3.1.1)
176
179
  rake (13.0.6)
180
+ regexp_parser (2.8.0)
181
+ rexml (3.2.5)
177
182
  rspec (3.12.0)
178
183
  rspec-core (~> 3.12.0)
179
184
  rspec-expectations (~> 3.12.0)
@@ -187,15 +192,45 @@ GEM
187
192
  diff-lcs (>= 1.2.0, < 2.0)
188
193
  rspec-support (~> 3.12.0)
189
194
  rspec-support (3.12.0)
195
+ rubocop (1.50.2)
196
+ json (~> 2.3)
197
+ parallel (~> 1.10)
198
+ parser (>= 3.2.0.0)
199
+ rainbow (>= 2.2.2, < 4.0)
200
+ regexp_parser (>= 1.8, < 3.0)
201
+ rexml (>= 3.2.5, < 4.0)
202
+ rubocop-ast (>= 1.28.0, < 2.0)
203
+ ruby-progressbar (~> 1.7)
204
+ unicode-display_width (>= 2.4.0, < 3.0)
205
+ rubocop-ast (1.28.1)
206
+ parser (>= 3.2.1.0)
207
+ rubocop-performance (1.16.0)
208
+ rubocop (>= 1.7.0, < 2.0)
209
+ rubocop-ast (>= 0.4.0)
190
210
  ruby-openai (4.0.0)
191
211
  faraday (>= 1)
192
212
  faraday-multipart (>= 1)
213
+ ruby-progressbar (1.13.0)
193
214
  ruby2_keywords (0.0.5)
215
+ standard (1.28.2)
216
+ language_server-protocol (~> 3.17.0.2)
217
+ lint_roller (~> 1.0)
218
+ rubocop (~> 1.50.2)
219
+ standard-custom (~> 1.0.0)
220
+ standard-performance (~> 1.0.1)
221
+ standard-custom (1.0.0)
222
+ lint_roller (~> 1.0)
223
+ standard-performance (1.0.1)
224
+ lint_roller (~> 1.0)
225
+ rubocop-performance (~> 1.16.0)
226
+ standardrb (1.0.1)
227
+ standard
194
228
  thor (1.2.1)
195
229
  treetop (1.6.12)
196
230
  polyglot (~> 0.3)
197
231
  tzinfo (2.0.6)
198
232
  concurrent-ruby (~> 1.0)
233
+ unicode-display_width (2.4.2)
199
234
  weaviate-ruby (0.8.1)
200
235
  faraday (~> 1)
201
236
  faraday_middleware (~> 1)
@@ -205,15 +240,27 @@ GEM
205
240
  zeitwerk (2.6.8)
206
241
 
207
242
  PLATFORMS
243
+ arm64-darwin-22
208
244
  x86_64-darwin-19
209
245
  x86_64-linux
210
246
 
211
247
  DEPENDENCIES
248
+ cohere-ruby (~> 0.9.3)
212
249
  dotenv-rails (~> 2.7.6)
250
+ eqn (~> 1.6.5)
251
+ google_search_results (~> 2.0.0)
252
+ hugging-face (~> 0.3.2)
213
253
  langchainrb!
254
+ milvus (~> 0.9.0)
255
+ pinecone (~> 0.1.6)
214
256
  pry-byebug (~> 3.10.0)
257
+ qdrant-ruby (~> 0.9.0)
215
258
  rake (~> 13.0)
216
259
  rspec (~> 3.0)
260
+ ruby-openai (~> 4.0.0)
261
+ standardrb
262
+ weaviate-ruby (~> 0.8.0)
263
+ wikipedia-client (~> 1.17.0)
217
264
 
218
265
  BUNDLED WITH
219
266
  2.4.0
data/README.md CHANGED
@@ -1,5 +1,5 @@
1
1
  🦜️🔗 LangChain.rb
2
- ---
2
+ ---
3
3
  ⚡ Building applications with LLMs through composability ⚡
4
4
 
5
5
  👨‍💻👩‍💻 CURRENTLY SEEKING PEOPLE TO FORM THE CORE GROUP OF MAINTAINERS WITH
@@ -39,6 +39,8 @@ require "langchain"
39
39
 
40
40
  Choose the LLM provider you'll be using (OpenAI or Cohere) and retrieve the API key.
41
41
 
42
+ Add `gem "weaviate-ruby", "~> 0.8.0"` to your Gemfile.
43
+
42
44
  Pick the vector search database you'll be using and instantiate the client:
43
45
  ```ruby
44
46
  client = Vectorsearch::Weaviate.new(
@@ -49,9 +51,9 @@ client = Vectorsearch::Weaviate.new(
49
51
  )
50
52
 
51
53
  # You can instantiate any other supported vector search database:
52
- client = Vectorsearch::Milvus.new(...)
53
- client = Vectorsearch::Qdrant.new(...)
54
- client = Vectorsearch::Pinecone.new(...)
54
+ client = Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.0"`
55
+ client = Vectorsearch::Qdrant.new(...) # `gem"qdrant-ruby", "~> 0.9.0"`
56
+ client = Vectorsearch::Pinecone.new(...) # `gem "pinecone", "~> 0.1.6"`
55
57
  ```
56
58
 
57
59
  ```ruby
@@ -92,6 +94,8 @@ client.ask(
92
94
 
93
95
  ### Using Standalone LLMs 🗣️
94
96
 
97
+ Add `gem "ruby-openai", "~> 4.0.0"` to your Gemfile.
98
+
95
99
  #### OpenAI
96
100
  ```ruby
97
101
  openai = LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
@@ -104,6 +108,8 @@ openai.complete(prompt: "What is the meaning of life?")
104
108
  ```
105
109
 
106
110
  #### Cohere
111
+ Add `gem "cohere-ruby", "~> 0.9.3"` to your Gemfile.
112
+
107
113
  ```ruby
108
114
  cohere = LLM::Cohere.new(api_key: ENV["COHERE_API_KEY"])
109
115
  ```
@@ -114,6 +120,9 @@ cohere.embed(text: "foo bar")
114
120
  cohere.complete(prompt: "What is the meaning of life?")
115
121
  ```
116
122
 
123
+ #### HuggingFace
124
+ Add `gem "hugging-face", "~> 0.3.2"` to your Gemfile.
125
+
117
126
  ### Using Prompts 📋
118
127
 
119
128
  #### Prompt Templates
@@ -204,6 +213,8 @@ Agents are semi-autonomous bots that can respond to user questions and use avail
204
213
 
205
214
  #### Chain-of-Thought Agent
206
215
 
216
+ Add `gem "openai-ruby"`, `gem "eqn"`, and `gem "google_search_results"` to your Gemfile
217
+
207
218
  ```ruby
208
219
  agent = Agent::ChainOfThoughtAgent.new(llm: :openai, llm_api_key: ENV["OPENAI_API_KEY"], tools: ['search', 'calculator'])
209
220
 
@@ -211,7 +222,7 @@ agent.tools
211
222
  # => ["search", "calculator"]
212
223
  ```
213
224
  ```ruby
214
- agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?", logging: true)
225
+ agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
215
226
  #=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
216
227
  ```
217
228
 
@@ -228,6 +239,16 @@ agent.run(question: "How many full soccer fields would be needed to cover the di
228
239
  | "search" | A wrapper around Google Search | `ENV["SERPAPI_API_KEY"]` (https://serpapi.com/manage-api-key)
229
240
  | "wikipedia" | Calls Wikipedia API to retrieve the summary | |
230
241
 
242
+
243
+ ## Logging
244
+
245
+ LangChain.rb uses standard logging mechanisms and defaults to `:debug` level. Most messages are at info level, but we will add debug or warn statements as needed.
246
+ To show all log messages:
247
+
248
+ ```ruby
249
+ Langchain.logger.level = :info
250
+ ```
251
+
231
252
  ## Development
232
253
 
233
254
  After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
data/Rakefile CHANGED
@@ -2,7 +2,12 @@
2
2
 
3
3
  require "bundler/gem_tasks"
4
4
  require "rspec/core/rake_task"
5
+ require "standard/rake"
5
6
 
6
7
  RSpec::Core::RakeTask.new(:spec)
7
8
 
8
9
  task default: :spec
10
+
11
+ Rake::Task["spec"].enhance do
12
+ Rake::Task["standard:fix"].invoke
13
+ end
@@ -9,10 +9,10 @@ prompt = Prompt::FewShotPromptTemplate.new(
9
9
  template: "Input: {input}\nOutput: {output}"
10
10
  ),
11
11
  examples: [
12
- { "input": "happy", "output": "sad" },
13
- { "input": "tall", "output": "short" }
12
+ {input: "happy", output: "sad"},
13
+ {input: "tall", output: "short"}
14
14
  ],
15
- input_variables: ["adjective"]
15
+ input_variables: ["adjective"]
16
16
  )
17
17
 
18
18
  prompt.format(adjective: "good")
@@ -1,5 +1,8 @@
1
1
  require "langchain"
2
2
 
3
+ # gem install pinecone
4
+ # or add `gem "pinecone"` to your Gemfile
5
+
3
6
  # Instantiate the Qdrant client
4
7
  pinecone = Vectorsearch::Pinecone.new(
5
8
  environment: ENV["PINECONE_ENVIRONMENT"],
@@ -35,9 +38,9 @@ pinecone.ask(
35
38
  )
36
39
 
37
40
  # Generate your an embedding and search by it
38
- openai = LLM::OpenAI.new(api_key: ENV['OPENAI_API_KEY'])
41
+ openai = LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
39
42
  embedding = openai.embed(text: "veggie")
40
43
 
41
44
  pinecone.similarity_search_by_vector(
42
45
  embedding: embedding
43
- )
46
+ )
@@ -1,5 +1,8 @@
1
1
  require "langchain"
2
2
 
3
+ # gem install qdrant-ruby
4
+ # or add `gem "qdrant-ruby"` to your Gemfile
5
+
3
6
  # Instantiate the Qdrant client
4
7
  qdrant = Vectorsearch::Qdrant.new(
5
8
  url: ENV["QDRANT_URL"],
@@ -9,7 +12,6 @@ qdrant = Vectorsearch::Qdrant.new(
9
12
  llm_api_key: ENV["COHERE_API_KEY"]
10
13
  )
11
14
 
12
-
13
15
  # Create the default schema.
14
16
  qdrant.create_default_schema
15
17
 
@@ -33,4 +35,4 @@ qdrant.similarity_search(
33
35
  # Interact with your index through Q&A
34
36
  qdrant.ask(
35
37
  question: "What is the best recipe for chicken?"
36
- )
38
+ )
@@ -1,5 +1,8 @@
1
1
  require "langchain"
2
2
 
3
+ # gem install weaviate-ruby
4
+ # or add `gem "weaviate-ruby"` to your Gemfile
5
+
3
6
  # Instantiate the Weaviate client
4
7
  weaviate = Vectorsearch::Weaviate.new(
5
8
  url: ENV["WEAVIATE_URL"],
@@ -27,4 +30,4 @@ weaviate.add_texts(
27
30
  weaviate.similarity_search(
28
31
  query: "chicken",
29
32
  k: 1
30
- )
33
+ )
@@ -5,7 +5,7 @@ module Agent
5
5
  attr_reader :llm, :llm_api_key, :llm_client, :tools
6
6
 
7
7
  # Initializes the Agent
8
- #
8
+ #
9
9
  # @param llm [Symbol] The LLM to use
10
10
  # @param llm_api_key [String] The API key for the LLM
11
11
  # @param tools [Array] The tools to use
@@ -22,7 +22,7 @@ module Agent
22
22
  end
23
23
 
24
24
  # Validate tools when they're re-assigned
25
- #
25
+ #
26
26
  # @param value [Array] The tools to use
27
27
  # @return [Array] The tools that will be used
28
28
  def tools=(value)
@@ -31,11 +31,10 @@ module Agent
31
31
  end
32
32
 
33
33
  # Run the Agent!
34
- #
34
+ #
35
35
  # @param question [String] The question to ask
36
- # @param logging [Boolean] Whether or not to log the Agent's actions
37
36
  # @return [String] The answer to the question
38
- def run(question:, logging: false)
37
+ def run(question:)
39
38
  question = question.strip
40
39
  prompt = create_prompt(
41
40
  question: question,
@@ -43,7 +42,7 @@ module Agent
43
42
  )
44
43
 
45
44
  loop do
46
- puts("Agent: Passing the prompt to the #{llm} LLM") if logging
45
+ Langchain.logger.info("Agent: Passing the prompt to the #{llm} LLM")
47
46
  response = llm_client.generate_completion(
48
47
  prompt: prompt,
49
48
  stop_sequences: ["Observation:"],
@@ -51,16 +50,16 @@ module Agent
51
50
  )
52
51
 
53
52
  # Append the response to the prompt
54
- prompt += response;
55
-
53
+ prompt += response
54
+
56
55
  # Find the requested action in the "Action: search" format
57
56
  action = response.match(/Action: (.*)/)&.send(:[], -1)
58
-
57
+
59
58
  if action
60
59
  # Find the input to the action in the "Action Input: [action_input]" format
61
60
  action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
62
61
 
63
- puts("Agent: Using the \"#{action}\" Tool with \"#{action_input}\"") if logging
62
+ Langchain.logger.info("Agent: Using the \"#{action}\" Tool with \"#{action_input}\"")
64
63
 
65
64
  # Retrieve the Tool::[ToolName] class and call `execute`` with action_input as the input
66
65
  result = Tool
@@ -68,10 +67,10 @@ module Agent
68
67
  .execute(input: action_input)
69
68
 
70
69
  # Append the Observation to the prompt
71
- if prompt.end_with?("Observation:")
72
- prompt += " #{result}\nThought:"
70
+ prompt += if prompt.end_with?("Observation:")
71
+ " #{result}\nThought:"
73
72
  else
74
- prompt += "\nObservation: #{result}\nThought:"
73
+ "\nObservation: #{result}\nThought:"
75
74
  end
76
75
  else
77
76
  # Return the final answer
@@ -92,7 +91,7 @@ module Agent
92
91
  question: question,
93
92
  tool_names: "[#{tools.join(", ")}]",
94
93
  tools: tools.map do |tool|
95
- "#{tool}: #{Tool.const_get(Tool::Base::TOOLS[tool]).const_get("DESCRIPTION")}"
94
+ "#{tool}: #{Tool.const_get(Tool::Base::TOOLS[tool]).const_get(:DESCRIPTION)}"
96
95
  end.join("\n")
97
96
  )
98
97
  end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ def depends_on(gem_name)
4
+ gem(gem_name) # require the gem
5
+
6
+ return(true) unless defined?(Bundler) # If we're in a non-bundler environment, we're no longer able to determine if we'll meet requirements
7
+
8
+ gem_version = Gem.loaded_specs[gem_name].version
9
+ gem_requirement = Bundler.load.dependencies.find { |g| g.name == gem_name }.requirement
10
+
11
+ if !gem_requirement.satisfied_by?(gem_version)
12
+ raise "The #{gem_name} gem is installed, but version #{gem_requirement} is required. You have #{gem_version}."
13
+ end
14
+
15
+ true
16
+ rescue LoadError
17
+ raise LoadError, "Could not load #{gem_name}. Please ensure that the #{gem_name} gem is installed."
18
+ end
data/lib/langchain.rb CHANGED
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative "./version"
4
+ require_relative "./dependency_helper"
5
+ require_relative "./logging"
4
6
 
5
7
  module Agent
6
8
  autoload :Base, "agent/base"
@@ -18,6 +20,7 @@ end
18
20
  module LLM
19
21
  autoload :Base, "llm/base"
20
22
  autoload :Cohere, "llm/cohere"
23
+ autoload :HuggingFace, "llm/hugging_face"
21
24
  autoload :OpenAI, "llm/openai"
22
25
  end
23
26
 
data/lib/llm/base.rb CHANGED
@@ -12,16 +12,16 @@ module LLM
12
12
  }.freeze
13
13
 
14
14
  def default_dimension
15
- self.class.const_get("DEFAULTS").dig(:dimension)
15
+ self.class.const_get(:DEFAULTS).dig(:dimension)
16
16
  end
17
17
 
18
18
  # Ensure that the LLM value passed in is supported
19
19
  # @param llm [Symbol] The LLM to use
20
20
  def self.validate_llm!(llm:)
21
21
  # TODO: Fix so this works when `llm` value is a string instead of a symbol
22
- unless LLM::Base::LLMS.keys.include?(llm)
22
+ unless LLM::Base::LLMS.key?(llm)
23
23
  raise ArgumentError, "LLM must be one of #{LLM::Base::LLMS.keys}"
24
24
  end
25
25
  end
26
26
  end
27
- end
27
+ end
data/lib/llm/cohere.rb CHANGED
@@ -1,10 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "cohere"
4
-
5
3
  module LLM
6
4
  class Cohere < Base
7
-
8
5
  DEFAULTS = {
9
6
  temperature: 0.0,
10
7
  completion_model_name: "base",
@@ -13,6 +10,9 @@ module LLM
13
10
  }.freeze
14
11
 
15
12
  def initialize(api_key:)
13
+ depends_on "cohere-ruby"
14
+ require "cohere"
15
+
16
16
  @client = ::Cohere::Client.new(api_key: api_key)
17
17
  end
18
18
 
@@ -22,7 +22,7 @@ module LLM
22
22
  def embed(text:)
23
23
  response = client.embed(
24
24
  texts: [text],
25
- model: DEFAULTS[:embeddings_model_name],
25
+ model: DEFAULTS[:embeddings_model_name]
26
26
  )
27
27
  response.dig("embeddings").first
28
28
  end
@@ -50,4 +50,4 @@ module LLM
50
50
  alias_method :generate_completion, :complete
51
51
  alias_method :generate_embedding, :embed
52
52
  end
53
- end
53
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ class HuggingFace < Base
5
+ # The gem does not currently accept other models:
6
+ # https://github.com/alchaplinsky/hugging-face/blob/main/lib/hugging_face/inference_api.rb#L32-L34
7
+ DEFAULTS = {
8
+ embeddings_model_name: "sentence-transformers/all-MiniLM-L6-v2"
9
+ }.freeze
10
+
11
+ #
12
+ # Intialize the HuggingFace LLM
13
+ # @param api_key [String] The API key to use
14
+ #
15
+ def initialize(api_key:)
16
+ depends_on "hugging-face"
17
+ require "hugging_face"
18
+
19
+ @client = ::HuggingFace::InferenceApi.new(api_token: api_key)
20
+ end
21
+
22
+ # Generate an embedding for a given text
23
+ # @param text [String] The text to embed
24
+ # @return [Array] The embedding
25
+ def embed(text:)
26
+ response = client.embedding(
27
+ input: text,
28
+ model: DEFAULTS[:embeddings_model_name]
29
+ )
30
+ end
31
+ end
32
+ end
data/lib/llm/openai.rb CHANGED
@@ -1,10 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "openai"
4
-
5
3
  module LLM
6
4
  class OpenAI < Base
7
-
8
5
  DEFAULTS = {
9
6
  temperature: 0.0,
10
7
  completion_model_name: "text-davinci-003",
@@ -13,6 +10,9 @@ module LLM
13
10
  }.freeze
14
11
 
15
12
  def initialize(api_key:)
13
+ depends_on "ruby-openai"
14
+ require "openai"
15
+
16
16
  # TODO: Add support to pass `organization_id:`
17
17
  @client = ::OpenAI::Client.new(access_token: api_key)
18
18
  end
@@ -53,4 +53,4 @@ module LLM
53
53
  alias_method :generate_completion, :complete
54
54
  alias_method :generate_embedding, :embed
55
55
  end
56
- end
56
+ end
data/lib/logging.rb ADDED
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+
5
+ module Langchain
6
+ def self.logger
7
+ @@logger ||= Logger.new($stdout, level: :warn, formatter: ->(severity, datetime, progname, msg) { "[LangChain.rb] #{msg}\n" })
8
+ end
9
+
10
+ def self.logger=(instance)
11
+ @@logger = instance
12
+ end
13
+ end
data/lib/prompt/base.rb CHANGED
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'strscan'
3
+ require "strscan"
4
+ require "json"
4
5
 
5
6
  module Prompt
6
7
  class Base
@@ -52,14 +53,12 @@ module Prompt
52
53
  FileUtils.mkdir_p(directory_path) unless directory_path.directory?
53
54
 
54
55
  if save_path.extname == ".json"
55
- File.open(file_path, "w") { |f| f.write(to_h.to_json) }
56
+ File.write(file_path, to_h.to_json)
56
57
  else
57
58
  raise ArgumentError, "#{file_path} must be json"
58
59
  end
59
60
  end
60
61
 
61
- private
62
-
63
62
  #
64
63
  # Extracts variables from a template string.
65
64
  #
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'strscan'
4
- require 'pathname'
3
+ require "strscan"
4
+ require "pathname"
5
5
 
6
6
  module Prompt
7
7
  TYPE_TO_LOADER = {
@@ -70,7 +70,7 @@ module Prompt
70
70
  def load_from_config(config)
71
71
  # If `_type` key is not present in the configuration hash, add it with a default value of `prompt`
72
72
  unless config.key?("_type")
73
- puts "[WARN] No `_type` key found, defaulting to `prompt`"
73
+ Langchain.logger.warn "No `_type` key found, defaulting to `prompt`"
74
74
  config["_type"] = "prompt"
75
75
  end
76
76