langchainrb 0.6.13 → 0.6.15

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9a8dc8c16a235328e6122725804fc8dface37910d2014ecf44410631d3ec63cb
4
- data.tar.gz: 5d69e6b1dda419d2834f9041a18eae48b2c63303cc901515cd883153635f0742
3
+ metadata.gz: 804ffbb08baabf8d2b0372e6893ca31a8c0933425dcabc78b2b48381b045d0c9
4
+ data.tar.gz: a53ed993838ab79c343618b445533c285f35e186c3a1f4412f40f7da12b9911b
5
5
  SHA512:
6
- metadata.gz: 0c176e717986c0bb0b74761858bf0a6aac8e84fff179791fd67b68441760d5a419ae767bb678bab8a783bfaf6a17f53b7177b24d96fd8f1b076fd4f091c5443e
7
- data.tar.gz: a0457aaf411f8932ada4bb3683fef81396334186047a5bb26040c0c03634443662806ad3d2d37ec81f4121f519e387f77688e395cc38882b7f8ab85c0795fba6
6
+ metadata.gz: d4aa19658c6c6ffdd5268c6ab83abe3ba17c3bb84b3880a6347bb67fa5c1b4bf0e9304b22c477b27401394450b692d0ee545f5745c6e3a2ec2e5e2ba50779584
7
+ data.tar.gz: b1c918b8d28e86b11cde99e1b976cbffcca36dbc8ac354e08ce72d9056cc5eafd6ddb601f92edfcd28907687c2247c417477173746405cd1ca2b2ec0fc51df83
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.6.15] - 2023-09-22
4
+ - Bump weaviate-ruby gem version
5
+ - Ollama support
6
+
7
+ ## [0.6.14] - 2023-09-11
8
+ - Add `find` method to `Langchain::Vectorsearch::Qdrant`
9
+ - Enhance Google search output
10
+ - Raise ApiError when OpenAI returns an error
11
+ - Update OpenAI `complete` method to use chat completion api
12
+ - Deprecate legacy completion models. See https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings
13
+
3
14
  ## [0.6.13] - 2023-08-23
4
15
  - Add `k:` parameter to all `ask()` vector search methods
5
16
  - Bump Faraday to 2.x
data/README.md CHANGED
@@ -210,6 +210,18 @@ anthropic = Langchain::LLM::Anthropic.new(api_key: ENV["ANTHROPIC_API_KEY"])
210
210
  anthropic.complete(prompt: "What is the meaning of life?")
211
211
  ```
212
212
 
213
+ #### Ollama
214
+ ```ruby
215
+ ollama = Langchain::LLM::Ollama.new(url: ENV["OLLAMA_URL"])
216
+ ```
217
+
218
+ ```ruby
219
+ ollama.complete(prompt: "What is the meaning of life?")
220
+ ```
221
+ ```ruby
222
+ ollama.embed(text: "Hello world!")
223
+ ```
224
+
213
225
  ### Using Prompts 📋
214
226
 
215
227
  #### Prompt Templates
@@ -83,9 +83,9 @@ module Langchain::Agent
83
83
  else
84
84
  "\nObservation: #{result}\nThought:"
85
85
  end
86
- else
86
+ elsif response.include?("Final Answer:")
87
87
  # Return the final answer
88
- final_response = response.match(/Final Answer: (.*)/)&.send(:[], -1)
88
+ final_response = response.split("Final Answer:")[-1]
89
89
  break
90
90
  end
91
91
  end
@@ -2,6 +2,8 @@
2
2
 
3
3
  module Langchain
4
4
  module DependencyHelper
5
+ class LoadError < ::LoadError; end
6
+
5
7
  class VersionError < ScriptError; end
6
8
 
7
9
  # This method requires and loads the given gem, and then checks to see if the version of the gem meets the requirements listed in `langchain.gemspec`
@@ -12,7 +14,7 @@ module Langchain
12
14
  # @raise [LoadError] If the gem is not installed
13
15
  # @raise [VersionError] If the gem is installed, but the version does not meet the requirements
14
16
  #
15
- def depends_on(gem_name)
17
+ def depends_on(gem_name, req: true)
16
18
  gem(gem_name) # require the gem
17
19
 
18
20
  return(true) unless defined?(Bundler) # If we're in a non-bundler environment, we're no longer able to determine if we'll meet requirements
@@ -26,8 +28,13 @@ module Langchain
26
28
  raise VersionError, "The #{gem_name} gem is installed, but version #{gem_requirement} is required. You have #{gem_version}."
27
29
  end
28
30
 
31
+ lib_name = gem_name if req == true
32
+ lib_name = req if req.is_a?(String)
33
+
34
+ require(lib_name) if lib_name
35
+
29
36
  true
30
- rescue LoadError
37
+ rescue ::LoadError
31
38
  raise LoadError, "Could not load #{gem_name}. Please ensure that the #{gem_name} gem is installed."
32
39
  end
33
40
  end
@@ -20,7 +20,6 @@ module Langchain::LLM
20
20
 
21
21
  def initialize(api_key:, default_options: {})
22
22
  depends_on "ai21"
23
- require "ai21"
24
23
 
25
24
  @client = ::AI21::Client.new(api_key)
26
25
  @defaults = DEFAULTS.merge(default_options)
@@ -22,7 +22,6 @@ module Langchain::LLM
22
22
 
23
23
  def initialize(api_key:, llm_options: {}, default_options: {})
24
24
  depends_on "anthropic"
25
- require "anthropic"
26
25
 
27
26
  @client = ::Anthropic::Client.new(access_token: api_key, **llm_options)
28
27
  @defaults = DEFAULTS.merge(default_options)
@@ -20,8 +20,7 @@ module Langchain::LLM
20
20
  }.freeze
21
21
 
22
22
  def initialize(api_key:, default_options: {})
23
- depends_on "cohere-ruby"
24
- require "cohere"
23
+ depends_on "cohere-ruby", req: "cohere"
25
24
 
26
25
  @client = ::Cohere::Client.new(api_key: api_key)
27
26
  @defaults = DEFAULTS.merge(default_options)
@@ -25,7 +25,6 @@ module Langchain::LLM
25
25
 
26
26
  def initialize(api_key:, default_options: {})
27
27
  depends_on "google_palm_api"
28
- require "google_palm_api"
29
28
 
30
29
  @client = ::GooglePalmApi::Client.new(api_key: api_key)
31
30
  @defaults = DEFAULTS.merge(default_options)
@@ -25,8 +25,7 @@ module Langchain::LLM
25
25
  # @param api_key [String] The API key to use
26
26
  #
27
27
  def initialize(api_key:)
28
- depends_on "hugging-face"
29
- require "hugging_face"
28
+ depends_on "hugging-face", req: "hugging_face"
30
29
 
31
30
  @client = ::HuggingFace::InferenceApi.new(api_token: api_key)
32
31
  end
@@ -24,7 +24,6 @@ module Langchain::LLM
24
24
  # @param seed [Integer] The seed to use
25
25
  def initialize(model_path:, n_gpu_layers: 1, n_ctx: 2048, n_threads: 1, seed: -1)
26
26
  depends_on "llama_cpp"
27
- require "llama_cpp"
28
27
 
29
28
  @model_path = model_path
30
29
  @n_gpu_layers = n_gpu_layers
@@ -0,0 +1,79 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ # Interface to Ollama API.
5
+ # Available models: https://ollama.ai/library
6
+ #
7
+ # Usage:
8
+ # ollama = Langchain::LLM::Ollama.new(url: ENV["OLLAMA_URL"])
9
+ #
10
+ class Ollama < Base
11
+ attr_reader :url
12
+
13
+ DEFAULTS = {
14
+ temperature: 0.0,
15
+ completion_model_name: "llama2",
16
+ embeddings_model_name: "llama2"
17
+ }.freeze
18
+
19
+ # Initialize the Ollama client
20
+ # @param url [String] The URL of the Ollama instance
21
+ def initialize(url:)
22
+ @url = url
23
+ end
24
+
25
+ # Generate the completion for a given prompt
26
+ # @param prompt [String] The prompt to complete
27
+ # @param model [String] The model to use
28
+ # @param options [Hash] The options to use (https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values)
29
+ # @return [String] The completed prompt
30
+ def complete(prompt:, model: nil, **options)
31
+ response = +""
32
+
33
+ client.post("api/generate") do |req|
34
+ req.body = {}
35
+ req.body["prompt"] = prompt
36
+ req.body["model"] = model || DEFAULTS[:completion_model_name]
37
+
38
+ req.body["options"] = options if options.any?
39
+
40
+ # TODO: Implement streaming support when a &block is passed in
41
+ req.options.on_data = proc do |chunk, size|
42
+ json_chunk = JSON.parse(chunk)
43
+
44
+ unless json_chunk.dig("done")
45
+ response.to_s << JSON.parse(chunk).dig("response")
46
+ end
47
+ end
48
+ end
49
+
50
+ response
51
+ end
52
+
53
+ # Generate an embedding for a given text
54
+ # @param text [String] The text to generate an embedding for
55
+ # @param model [String] The model to use
56
+ # @param options [Hash] The options to use (
57
+ def embed(text:, model: nil, **options)
58
+ response = client.post("api/embeddings") do |req|
59
+ req.body = {}
60
+ req.body["prompt"] = text
61
+ req.body["model"] = model || DEFAULTS[:embeddings_model_name]
62
+
63
+ req.body["options"] = options if options.any?
64
+ end
65
+
66
+ response.body.dig("embedding")
67
+ end
68
+
69
+ private
70
+
71
+ def client
72
+ @client ||= Faraday.new(url: url) do |conn|
73
+ conn.request :json
74
+ conn.response :json
75
+ conn.response :raise_error
76
+ end
77
+ end
78
+ end
79
+ end
@@ -12,11 +12,19 @@ module Langchain::LLM
12
12
  class OpenAI < Base
13
13
  DEFAULTS = {
14
14
  temperature: 0.0,
15
- completion_model_name: "text-davinci-003",
15
+ completion_model_name: "gpt-3.5-turbo",
16
16
  chat_completion_model_name: "gpt-3.5-turbo",
17
17
  embeddings_model_name: "text-embedding-ada-002",
18
18
  dimension: 1536
19
19
  }.freeze
20
+
21
+ LEGACY_COMPLETION_MODELS = %w[
22
+ ada
23
+ babbage
24
+ curie
25
+ davinci
26
+ ].freeze
27
+
20
28
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
21
29
  ROLE_MAPPING = {
22
30
  "ai" => "assistant",
@@ -26,8 +34,7 @@ module Langchain::LLM
26
34
  attr_accessor :functions
27
35
 
28
36
  def initialize(api_key:, llm_options: {}, default_options: {})
29
- depends_on "ruby-openai"
30
- require "openai"
37
+ depends_on "ruby-openai", req: "openai"
31
38
 
32
39
  @client = ::OpenAI::Client.new(access_token: api_key, **llm_options)
33
40
  @defaults = DEFAULTS.merge(default_options)
@@ -45,7 +52,10 @@ module Langchain::LLM
45
52
 
46
53
  validate_max_tokens(text, parameters[:model])
47
54
 
48
- response = client.embeddings(parameters: parameters.merge(params))
55
+ response = with_api_error_handling do
56
+ client.embeddings(parameters: parameters.merge(params))
57
+ end
58
+
49
59
  response.dig("data").first.dig("embedding")
50
60
  end
51
61
 
@@ -59,11 +69,16 @@ module Langchain::LLM
59
69
  def complete(prompt:, **params)
60
70
  parameters = compose_parameters @defaults[:completion_model_name], params
61
71
 
62
- parameters[:prompt] = prompt
63
- parameters[:max_tokens] = validate_max_tokens(prompt, parameters[:model])
72
+ return legacy_complete(prompt, parameters) if is_legacy_model?(parameters[:model])
64
73
 
65
- response = client.completions(parameters: parameters)
66
- response.dig("choices", 0, "text")
74
+ parameters[:messages] = compose_chat_messages(prompt: prompt)
75
+ parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
76
+
77
+ response = with_api_error_handling do
78
+ client.chat(parameters: parameters)
79
+ end
80
+
81
+ response.dig("choices", 0, "message", "content")
67
82
  end
68
83
 
69
84
  #
@@ -131,8 +146,10 @@ module Langchain::LLM
131
146
  end
132
147
  end
133
148
 
134
- response = client.chat(parameters: parameters)
135
- raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
149
+ response = with_api_error_handling do
150
+ client.chat(parameters: parameters)
151
+ end
152
+
136
153
  unless streaming
137
154
  message = response.dig("choices", 0, "message")
138
155
  content = message["content"]
@@ -158,6 +175,22 @@ module Langchain::LLM
158
175
 
159
176
  private
160
177
 
178
+ def is_legacy_model?(model)
179
+ LEGACY_COMPLETION_MODELS.any? { |legacy_model| model.include?(legacy_model) }
180
+ end
181
+
182
+ def legacy_complete(prompt, parameters)
183
+ Langchain.logger.warn "DEPRECATION WARNING: The model #{parameters[:model]} is deprecated. Please use gpt-3.5-turbo instead. Details: https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings"
184
+
185
+ parameters[:prompt] = prompt
186
+ parameters[:max_tokens] = validate_max_tokens(prompt, parameters[:model])
187
+
188
+ response = with_api_error_handling do
189
+ client.completions(parameters: parameters)
190
+ end
191
+ response.dig("choices", 0, "text")
192
+ end
193
+
161
194
  def compose_parameters(model, params)
162
195
  default_params = {model: model, temperature: @defaults[:temperature]}
163
196
 
@@ -166,7 +199,7 @@ module Langchain::LLM
166
199
  default_params.merge(params)
167
200
  end
168
201
 
169
- def compose_chat_messages(prompt:, messages:, context:, examples:)
202
+ def compose_chat_messages(prompt:, messages: [], context: "", examples: [])
170
203
  history = []
171
204
 
172
205
  history.concat transform_messages(examples) unless examples.empty?
@@ -198,6 +231,13 @@ module Langchain::LLM
198
231
  end
199
232
  end
200
233
 
234
+ def with_api_error_handling
235
+ response = yield
236
+ raise Langchain::LLM::ApiError.new "OpenAI API error: #{response.dig("error", "message")}" if response&.dig("error")
237
+
238
+ response
239
+ end
240
+
201
241
  def validate_max_tokens(messages, model)
202
242
  LENGTH_VALIDATOR.validate_max_tokens!(messages, model)
203
243
  end
@@ -33,8 +33,7 @@ module Langchain::LLM
33
33
  # @param api_key [String] The API key to use
34
34
  #
35
35
  def initialize(api_key:, default_options: {})
36
- depends_on "replicate-ruby"
37
- require "replicate"
36
+ depends_on "replicate-ruby", req: "replicate"
38
37
 
39
38
  ::Replicate.configure do |config|
40
39
  config.api_token = api_key
@@ -98,8 +98,8 @@ module Langchain
98
98
  Dir.glob(File.join(@path, "**/*")).map do |file|
99
99
  # Only load and add to result files with supported extensions
100
100
  Langchain::Loader.new(file, @options).load(&block)
101
- rescue => e
102
- UnknownFormatError.new(e)
101
+ rescue
102
+ UnknownFormatError nil
103
103
  end.flatten.compact
104
104
  end
105
105
 
@@ -134,7 +134,6 @@ module Langchain
134
134
  end
135
135
 
136
136
  def source_type
137
- binding.pry
138
137
  url? ? @raw_data.content_type : File.extname(@path)
139
138
  end
140
139
 
@@ -8,7 +8,6 @@ module Langchain
8
8
 
9
9
  def initialize(*)
10
10
  depends_on "docx"
11
- require "docx"
12
11
  end
13
12
 
14
13
  # Parse the document and return the text
@@ -11,7 +11,6 @@ module Langchain
11
11
 
12
12
  def initialize(*)
13
13
  depends_on "nokogiri"
14
- require "nokogiri"
15
14
  end
16
15
 
17
16
  # Parse the document and return the text
@@ -8,7 +8,6 @@ module Langchain
8
8
 
9
9
  def initialize(*)
10
10
  depends_on "pdf-reader"
11
- require "pdf-reader"
12
11
  end
13
12
 
14
13
  # Parse the document and return the text
@@ -8,7 +8,6 @@ module Langchain
8
8
 
9
9
  def initialize(*)
10
10
  depends_on "roo"
11
- require "roo"
12
11
  end
13
12
 
14
13
  # Parse the document and return the text
@@ -25,7 +25,6 @@ module Langchain::Tool
25
25
 
26
26
  def initialize
27
27
  depends_on "eqn"
28
- require "eqn"
29
28
  end
30
29
 
31
30
  # Evaluates a pure math expression or if equation contains non-math characters (e.g.: "12F in Celsius") then
@@ -27,7 +27,6 @@ module Langchain::Tool
27
27
  #
28
28
  def initialize(connection_string:, tables: [], exclude_tables: [])
29
29
  depends_on "sequel"
30
- require "sequel"
31
30
 
32
31
  raise StandardError, "connection_string parameter cannot be blank" if connection_string.empty?
33
32
 
@@ -3,7 +3,7 @@
3
3
  module Langchain::Tool
4
4
  class GoogleSearch < Base
5
5
  #
6
- # Wrapper around Google Serp SPI
6
+ # Wrapper around SerpApi's Google Search API
7
7
  #
8
8
  # Gem requirements: gem "google_search_results", "~> 2.0.0"
9
9
  #
@@ -15,7 +15,7 @@ module Langchain::Tool
15
15
  NAME = "google_search"
16
16
 
17
17
  description <<~DESC
18
- A wrapper around Google Search.
18
+ A wrapper around SerpApi's Google Search API.
19
19
 
20
20
  Useful for when you need to answer questions about current events.
21
21
  Always one of the first options when you need to find information on internet.
@@ -33,7 +33,7 @@ module Langchain::Tool
33
33
  #
34
34
  def initialize(api_key:)
35
35
  depends_on "google_search_results"
36
- require "google_search_results"
36
+
37
37
  @api_key = api_key
38
38
  end
39
39
 
@@ -56,13 +56,78 @@ module Langchain::Tool
56
56
  def execute(input:)
57
57
  Langchain.logger.info("Executing \"#{input}\"", for: self.class)
58
58
 
59
- hash_results = execute_search(input: input)
59
+ results = execute_search(input: input)
60
+
61
+ answer_box = results[:answer_box_list] ? results[:answer_box_list].first : results[:answer_box]
62
+ if answer_box
63
+ return answer_box[:result] ||
64
+ answer_box[:answer] ||
65
+ answer_box[:snippet] ||
66
+ answer_box[:snippet_highlighted_words] ||
67
+ answer_box.reject { |_k, v| v.is_a?(Hash) || v.is_a?(Array) || v.start_with?("http") }
68
+ elsif (events_results = results[:events_results])
69
+ return events_results.take(10)
70
+ elsif (sports_results = results[:sports_results])
71
+ return sports_results
72
+ elsif (top_stories = results[:top_stories])
73
+ return top_stories
74
+ elsif (news_results = results[:news_results])
75
+ return news_results
76
+ elsif (jobs_results = results.dig(:jobs_results, :jobs))
77
+ return jobs_results
78
+ elsif (shopping_results = results[:shopping_results]) && shopping_results.first.key?(:title)
79
+ return shopping_results.take(3)
80
+ elsif (questions_and_answers = results[:questions_and_answers])
81
+ return questions_and_answers
82
+ elsif (popular_destinations = results.dig(:popular_destinations, :destinations))
83
+ return popular_destinations
84
+ elsif (top_sights = results.dig(:top_sights, :sights))
85
+ return top_sights
86
+ elsif (images_results = results[:images_results]) && images_results.first.key?(:thumbnail)
87
+ return images_results.map { |h| h[:thumbnail] }.take(10)
88
+ end
89
+
90
+ snippets = []
91
+ if (knowledge_graph = results[:knowledge_graph])
92
+ snippets << knowledge_graph[:description] if knowledge_graph[:description]
93
+
94
+ title = knowledge_graph[:title] || ""
95
+ knowledge_graph.each do |k, v|
96
+ if v.is_a?(String) &&
97
+ k != :title &&
98
+ k != :description &&
99
+ !k.to_s.end_with?("_stick") &&
100
+ !k.to_s.end_with?("_link") &&
101
+ !k.to_s.start_with?("http")
102
+ snippets << "#{title} #{k}: #{v}"
103
+ end
104
+ end
105
+ end
106
+
107
+ if (first_organic_result = results.dig(:organic_results, 0))
108
+ if (snippet = first_organic_result[:snippet])
109
+ snippets << snippet
110
+ elsif (snippet_highlighted_words = first_organic_result[:snippet_highlighted_words])
111
+ snippets << snippet_highlighted_words
112
+ elsif (rich_snippet = first_organic_result[:rich_snippet])
113
+ snippets << rich_snippet
114
+ elsif (rich_snippet_table = first_organic_result[:rich_snippet_table])
115
+ snippets << rich_snippet_table
116
+ elsif (link = first_organic_result[:link])
117
+ snippets << link
118
+ end
119
+ end
120
+
121
+ if (buying_guide = results[:buying_guide])
122
+ snippets << buying_guide
123
+ end
124
+
125
+ if (local_results = results.dig(:local_results, :places))
126
+ snippets << local_results
127
+ end
60
128
 
61
- # TODO: Glance at all of the fields that langchain Python looks through: https://github.com/hwchase17/langchain/blob/v0.0.166/langchain/utilities/serpapi.py#L128-L156
62
- # We may need to do the same thing here.
63
- hash_results.dig(:answer_box, :answer) ||
64
- hash_results.dig(:answer_box, :snippet) ||
65
- hash_results.dig(:organic_results, 0, :snippet)
129
+ return "No good search result found" if snippets.empty?
130
+ snippets
66
131
  end
67
132
 
68
133
  #
@@ -13,9 +13,9 @@ module Langchain::Tool
13
13
  DESC
14
14
 
15
15
  def initialize(timeout: 30)
16
- @timeout = timeout
17
16
  depends_on "safe_ruby"
18
- require "safe_ruby"
17
+
18
+ @timeout = timeout
19
19
  end
20
20
 
21
21
  # @param input [String] ruby code expression
@@ -18,8 +18,7 @@ module Langchain::Tool
18
18
  DESC
19
19
 
20
20
  def initialize
21
- depends_on "wikipedia-client"
22
- require "wikipedia"
21
+ depends_on "wikipedia-client", req: "wikipedia"
23
22
  end
24
23
 
25
24
  # Executes Wikipedia API search and returns the answer
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "tiktoken_ruby"
4
+
5
+ module Langchain
6
+ module Utils
7
+ module TokenLength
8
+ #
9
+ # This class is meant to validate the length of the text passed in to Ollama.
10
+ # It is used to validate the token length before the API call is made
11
+ #
12
+ class OllamaValidator < BaseValidator
13
+ end
14
+ end
15
+ end
16
+ end
@@ -18,7 +18,6 @@ module Langchain::Vectorsearch
18
18
  # @param llm [Object] The LLM client to use
19
19
  def initialize(url:, index_name:, llm:, api_key: nil)
20
20
  depends_on "chroma-db"
21
- require "chroma-db"
22
21
 
23
22
  ::Chroma.connect_host = url
24
23
  ::Chroma.logger = Langchain.logger
@@ -23,7 +23,6 @@ module Langchain::Vectorsearch
23
23
  #
24
24
  def initialize(llm:, path_to_index:)
25
25
  depends_on "hnswlib"
26
- require "hnswlib"
27
26
 
28
27
  super(llm: llm)
29
28
 
@@ -13,7 +13,6 @@ module Langchain::Vectorsearch
13
13
 
14
14
  def initialize(url:, index_name:, llm:, api_key: nil)
15
15
  depends_on "milvus"
16
- require "milvus"
17
16
 
18
17
  @client = ::Milvus::Client.new(url: url)
19
18
  @index_name = index_name
@@ -26,9 +26,7 @@ module Langchain::Vectorsearch
26
26
  # @param namespace [String] The namespace to use for the index when inserting/querying
27
27
  def initialize(url:, index_name:, llm:, namespace: nil)
28
28
  depends_on "sequel"
29
- require "sequel"
30
29
  depends_on "pgvector"
31
- require "pgvector"
32
30
 
33
31
  @db = Sequel.connect(url)
34
32
 
@@ -18,7 +18,6 @@ module Langchain::Vectorsearch
18
18
  # @param llm [Object] The LLM client to use
19
19
  def initialize(environment:, api_key:, index_name:, llm:)
20
20
  depends_on "pinecone"
21
- require "pinecone"
22
21
 
23
22
  ::Pinecone.configure do |config|
24
23
  config.api_key = api_key
@@ -31,6 +30,19 @@ module Langchain::Vectorsearch
31
30
  super(llm: llm)
32
31
  end
33
32
 
33
+ # Find records by ids
34
+ # @param ids [Array] The ids to find
35
+ # @param namespace String The namespace to search through
36
+ # @return [Hash] The response from the server
37
+ def find(ids: [], namespace: "")
38
+ raise ArgumentError, "Ids must be provided" if Array(ids).empty?
39
+
40
+ client.index(index_name).fetch(
41
+ ids: ids,
42
+ namespace: namespace
43
+ )
44
+ end
45
+
34
46
  # Add a list of texts to the index
35
47
  # @param texts [Array] The list of texts to add
36
48
  # @param ids [Array] The list of IDs to add
@@ -17,8 +17,7 @@ module Langchain::Vectorsearch
17
17
  # @param index_name [String] The name of the index to use
18
18
  # @param llm [Object] The LLM client to use
19
19
  def initialize(url:, api_key:, index_name:, llm:)
20
- depends_on "qdrant-ruby"
21
- require "qdrant"
20
+ depends_on "qdrant-ruby", req: "qdrant"
22
21
 
23
22
  @client = ::Qdrant::Client.new(
24
23
  url: url,
@@ -29,6 +28,18 @@ module Langchain::Vectorsearch
29
28
  super(llm: llm)
30
29
  end
31
30
 
31
+ # Find records by ids
32
+ # @param ids [Array] The ids to find
33
+ # @return [Hash] The response from the server
34
+ def find(ids: [])
35
+ client.points.get_all(
36
+ collection_name: index_name,
37
+ ids: ids,
38
+ with_payload: true,
39
+ with_vector: true
40
+ )
41
+ end
42
+
32
43
  # Add a list of texts to the index
33
44
  # @param texts [Array] The list of texts to add
34
45
  # @return [Hash] The response from the server
@@ -17,8 +17,7 @@ module Langchain::Vectorsearch
17
17
  # @param index_name [String] The capitalized name of the index to use
18
18
  # @param llm [Object] The LLM client to use
19
19
  def initialize(url:, api_key:, index_name:, llm:)
20
- depends_on "weaviate-ruby"
21
- require "weaviate"
20
+ depends_on "weaviate-ruby", req: "weaviate"
22
21
 
23
22
  @client = ::Weaviate::Client.new(
24
23
  url: url,
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.13"
4
+ VERSION = "0.6.15"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.13
4
+ version: 0.6.15
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-08-23 00:00:00.000000000 Z
11
+ date: 2023-09-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: 0.1.6
19
+ version: 0.1.8
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: 0.1.6
26
+ version: 0.1.8
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: colorize
29
29
  requirement: !ruby/object:Gem::Requirement
@@ -408,14 +408,14 @@ dependencies:
408
408
  requirements:
409
409
  - - "~>"
410
410
  - !ruby/object:Gem::Version
411
- version: 0.9.3
411
+ version: 0.9.4
412
412
  type: :development
413
413
  prerelease: false
414
414
  version_requirements: !ruby/object:Gem::Requirement
415
415
  requirements:
416
416
  - - "~>"
417
417
  - !ruby/object:Gem::Version
418
- version: 0.9.3
418
+ version: 0.9.4
419
419
  - !ruby/object:Gem::Dependency
420
420
  name: roo
421
421
  requirement: !ruby/object:Gem::Requirement
@@ -478,14 +478,14 @@ dependencies:
478
478
  requirements:
479
479
  - - "~>"
480
480
  - !ruby/object:Gem::Version
481
- version: 0.8.6
481
+ version: 0.8.7
482
482
  type: :development
483
483
  prerelease: false
484
484
  version_requirements: !ruby/object:Gem::Requirement
485
485
  requirements:
486
486
  - - "~>"
487
487
  - !ruby/object:Gem::Version
488
- version: 0.8.6
488
+ version: 0.8.7
489
489
  - !ruby/object:Gem::Dependency
490
490
  name: wikipedia-client
491
491
  requirement: !ruby/object:Gem::Requirement
@@ -535,6 +535,7 @@ files:
535
535
  - lib/langchain/llm/google_palm.rb
536
536
  - lib/langchain/llm/hugging_face.rb
537
537
  - lib/langchain/llm/llama_cpp.rb
538
+ - lib/langchain/llm/ollama.rb
538
539
  - lib/langchain/llm/openai.rb
539
540
  - lib/langchain/llm/prompts/summarize_template.yaml
540
541
  - lib/langchain/llm/replicate.rb
@@ -571,6 +572,7 @@ files:
571
572
  - lib/langchain/utils/token_length/base_validator.rb
572
573
  - lib/langchain/utils/token_length/cohere_validator.rb
573
574
  - lib/langchain/utils/token_length/google_palm_validator.rb
575
+ - lib/langchain/utils/token_length/ollama_validator.rb
574
576
  - lib/langchain/utils/token_length/openai_validator.rb
575
577
  - lib/langchain/utils/token_length/token_limit_exceeded.rb
576
578
  - lib/langchain/vectorsearch/base.rb