langchainrb 0.6.5 → 0.6.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3404535e036c3efe68fd12706d2ebb269caed87b562fc38434122b1be01a356d
4
- data.tar.gz: e3be77b32cf754235e8895fb1af60edca54cb5acb84278bfa2e39b6ed7c2abbe
3
+ metadata.gz: 5e8dc16375bc94045a728b37d6d388a8670699c35af4213bc4d16163fbb0047c
4
+ data.tar.gz: c9695ac8ce5d6bf351afd3148d281a26733f37a43674fae71c3c81c7530cdc2e
5
5
  SHA512:
6
- metadata.gz: b3fae04c73176c758c2d2d32c3ac538f3e094eb10f378b9a8befbbdcc62b60e55941a1bfefcb61eac7daca43ef91d0e57306dbc26bd59afbdad6ab4efff2ba89
7
- data.tar.gz: 626bb4a226112ee6fe709077a6d49ba91c0483fee657848153e9cff61693183709aede5844237c24cc02c561f59be82ea1fd296fe1c3f4ee4d971494ee4dcd75
6
+ metadata.gz: 67a532ea9c46961a8636f1332590876f8d5d26e67384789c9a4ab7907c4a2150240a7c9d63ae188d19a269e345ff0dfafcf24ab1cb4a987e4eab5cc2439bbf06
7
+ data.tar.gz: 333888fdd41e2ce21b369fc0844ca3b181f2db603833bf8cbf8a46e59b8049dcb31a216fb4adcb12fe5a69dc33ec21725131e6e8ce8e7e7633814f5dc7c51c7b
data/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.6.7] - 2023-07-19
4
+ - Support for OpenAI functions
5
+ - Streaming vectorsearch ask() responses
6
+
7
+ ## [0.6.6] - 2023-07-13
8
+ - Langchain::Chunker::RecursiveText
9
+ - Fixes
10
+
11
+ ## [0.6.5] - 2023-07-06
12
+ - 🗣️ LLMs
13
+ - Introducing Llama.cpp support
14
+ - Langchain::OutputParsers::OutputFixingParser to wrap a Langchain::OutputParser and handle invalid response
15
+
3
16
  ## [0.6.4] - 2023-07-01
4
17
  - Fix `Langchain::Vectorsearch::Qdrant#add_texts()`
5
18
  - Introduce `ConversationMemory`
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.6.5)
4
+ langchainrb (0.6.7)
5
5
  baran (~> 0.1.6)
6
6
  colorize (~> 0.8.1)
7
7
  json-schema (~> 4.0.0)
@@ -133,7 +133,7 @@ GEM
133
133
  faraday (>= 1.0)
134
134
  faraday_middleware
135
135
  graphql-client
136
- graphql (2.0.23)
136
+ graphql (2.0.24)
137
137
  graphql-client (0.18.0)
138
138
  activesupport (>= 3.0)
139
139
  graphql
@@ -264,7 +264,7 @@ GEM
264
264
  rubocop (>= 1.7.0, < 2.0)
265
265
  rubocop-ast (>= 0.4.0)
266
266
  ruby-next-core (0.15.3)
267
- ruby-openai (4.0.0)
267
+ ruby-openai (4.1.0)
268
268
  faraday (>= 1)
269
269
  faraday-multipart (>= 1)
270
270
  ruby-progressbar (1.13.0)
@@ -299,7 +299,7 @@ GEM
299
299
  tzinfo (2.0.6)
300
300
  concurrent-ruby (~> 1.0)
301
301
  unicode-display_width (2.4.2)
302
- weaviate-ruby (0.8.3)
302
+ weaviate-ruby (0.8.4)
303
303
  faraday (~> 1)
304
304
  faraday_middleware (~> 1)
305
305
  graphlient (~> 0.6.0)
@@ -344,7 +344,7 @@ DEPENDENCIES
344
344
  roo (~> 2.10.0)
345
345
  rspec (~> 3.0)
346
346
  rubocop
347
- ruby-openai (~> 4.0.0)
347
+ ruby-openai (~> 4.1.0)
348
348
  safe_ruby (~> 1.0.4)
349
349
  sequel (~> 5.68.0)
350
350
  standardrb
data/README.md CHANGED
@@ -39,7 +39,7 @@ require "langchain"
39
39
  | [Hnswlib](https://github.com/nmslib/hnswlib/) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | WIP |
40
40
  | [Milvus](https://milvus.io/) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | WIP |
41
41
  | [Pinecone](https://www.pinecone.io/) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | :white_check_mark: |
42
- | [Pgvector](https://github.com/pgvector/pgvector) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | WIP |
42
+ | [Pgvector](https://github.com/pgvector/pgvector) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | :white_check_mark: |
43
43
  | [Qdrant](https://qdrant.tech/) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | :white_check_mark: |
44
44
  | [Weaviate](https://weaviate.io/) | :white_check_mark: | :white_check_mark: | :white_check_mark: | WIP | :white_check_mark: |
45
45
 
@@ -54,7 +54,7 @@ Pick the vector search database you'll be using and instantiate the client:
54
54
  client = Langchain::Vectorsearch::Weaviate.new(
55
55
  url: ENV["WEAVIATE_URL"],
56
56
  api_key: ENV["WEAVIATE_API_KEY"],
57
- index: "",
57
+ index_name: "",
58
58
  llm: Langchain::LLM::OpenAI.new(api_key: ENV["OPENAI_API_KEY"])
59
59
  )
60
60
 
@@ -145,6 +145,27 @@ openai.embed(text: "foo bar")
145
145
  openai.complete(prompt: "What is the meaning of life?")
146
146
  ```
147
147
 
148
+ ##### Open AI Function calls support
149
+
150
+ Conversation support
151
+
152
+ ```ruby
153
+ chat = Langchain::Conversation.new(llm: openai)
154
+ ```
155
+ ```ruby
156
+ chat.set_context("You are the climate bot")
157
+ chat.set_functions(functions)
158
+ ```
159
+
160
+ qdrant:
161
+
162
+ ```ruby
163
+ client.llm.functions = functions
164
+ client.llm.complete_response = true
165
+ ```
166
+
167
+ `complete_response` will return the entire choices data from the gpt response
168
+
148
169
  #### Cohere
149
170
  Add `gem "cohere-ruby", "~> 0.9.3"` to your Gemfile.
150
171
 
@@ -427,7 +448,7 @@ agent.run(question: "How many users have a name with length greater than 5 in th
427
448
  | "database" | Useful for querying a SQL database | | `gem "sequel", "~> 5.68.0"` |
428
449
  | "ruby_code_interpreter" | Interprets Ruby expressions | | `gem "safe_ruby", "~> 1.0.4"` |
429
450
  | "google_search" | A wrapper around Google Search | `ENV["SERPAPI_API_KEY"]` (https://serpapi.com/manage-api-key) | `gem "google_search_results", "~> 2.0.0"` |
430
- | "weather" | Calls Open Weather API to retrieve the current weather | `ENV["OPEN_WEATHER_API_KEY]` (https://home.openweathermap.org/api_keys) | `gem "open-weather-ruby-client", "~> 0.3.0"` |
451
+ | "weather" | Calls Open Weather API to retrieve the current weather | `ENV["OPEN_WEATHER_API_KEY"]` (https://home.openweathermap.org/api_keys) | `gem "open-weather-ruby-client", "~> 0.3.0"` |
431
452
  | "wikipedia" | Calls Wikipedia API to retrieve the summary | | `gem "wikipedia-client", "~> 1.17.0"` |
432
453
 
433
454
  #### Loaders 🚚
@@ -0,0 +1,41 @@
1
+ require "langchain"
2
+ require "dotenv/load"
3
+
4
+ functions = [
5
+ {
6
+ name: "get_current_weather",
7
+ description: "Get the current weather in a given location",
8
+ parameters: {
9
+ type: :object,
10
+ properties: {
11
+ location: {
12
+ type: :string,
13
+ description: "The city and state, e.g. San Francisco, CA"
14
+ },
15
+ unit: {
16
+ type: "string",
17
+ enum: %w[celsius fahrenheit]
18
+ }
19
+ },
20
+ required: ["location"]
21
+ }
22
+ }
23
+ ]
24
+
25
+ openai = Langchain::LLM::OpenAI.new(
26
+ api_key: ENV["OPENAI_API_KEY"],
27
+ default_options: {
28
+ chat_completion_model_name: "gpt-3.5-turbo-16k"
29
+ }
30
+ )
31
+
32
+ chat = Langchain::Conversation.new(llm: openai)
33
+
34
+ chat.set_context("You are the climate bot")
35
+ chat.set_functions(functions)
36
+
37
+ DONE = %w[done end eof exit].freeze
38
+
39
+ user_message = "what's the weather in NYC?"
40
+
41
+ puts chat.message(user_message)
@@ -0,0 +1,43 @@
1
+ require "langchain"
2
+ require "dotenv/load"
3
+
4
+ functions = [
5
+ {
6
+ name: "create_rails_controller",
7
+ description: "gives a command to create a rails controller",
8
+ parameters: {
9
+ type: :object,
10
+ properties: {
11
+ controller_name: {
12
+ type: :string,
13
+ description: "the controller name, e.g. users_controller"
14
+ },
15
+ unit: {
16
+ type: "string",
17
+ enum: %w[celsius fahrenheit]
18
+ }
19
+ },
20
+ required: ["controller_name"]
21
+ }
22
+ }
23
+ ]
24
+
25
+ openai = Langchain::LLM::OpenAI.new(
26
+ api_key: ENV["OPENAI_API_KEY"],
27
+ default_options: {
28
+ chat_completion_model_name: "gpt-3.5-turbo-16k"
29
+ }
30
+ )
31
+
32
+ client = Langchain::Vectorsearch::Qdrant.new(
33
+ url: ENV["QDRANT_URL"],
34
+ api_key: ENV["QDRANT_API_KEY"],
35
+ index_name: ENV["QDRANT_INDEX"],
36
+ llm: openai
37
+ )
38
+
39
+ client.llm.functions = functions
40
+ client.llm.complete_response = true
41
+ chat = client.ask(question: "create a users_controller")
42
+
43
+ puts chat
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "baran"
4
+
5
+ module Langchain
6
+ module Chunker
7
+ #
8
+ # Recursive text chunker. Preferentially splits on separators.
9
+ #
10
+ # Usage:
11
+ # Langchain::Chunker::RecursiveText.new(text).chunks
12
+ #
13
+ class RecursiveText < Base
14
+ attr_reader :text, :chunk_size, :chunk_overlap, :separators
15
+
16
+ # @param [String] text
17
+ # @param [Integer] chunk_size
18
+ # @param [Integer] chunk_overlap
19
+ # @param [Array<String>] separators
20
+ def initialize(text, chunk_size: 1000, chunk_overlap: 200, separators: ["\n\n"])
21
+ @text = text
22
+ @chunk_size = chunk_size
23
+ @chunk_overlap = chunk_overlap
24
+ @separators = separators
25
+ end
26
+
27
+ # @return [Array<String>]
28
+ def chunks
29
+ splitter = Baran::RecursiveCharacterTextSplitter.new(
30
+ chunk_size: chunk_size,
31
+ chunk_overlap: chunk_overlap,
32
+ separators: separators
33
+ )
34
+ splitter.chunks(text)
35
+ end
36
+ end
37
+ end
38
+ end
@@ -37,6 +37,11 @@ module Langchain
37
37
  @block = block
38
38
  end
39
39
 
40
+ def set_functions(functions)
41
+ @llm.functions = functions
42
+ @llm.complete_response = true
43
+ end
44
+
40
45
  # Set the context of the conversation. Usually used to set the model's persona.
41
46
  # @param message [String] The context of the conversation
42
47
  def set_context(message)
@@ -19,6 +19,8 @@ module Langchain::LLM
19
19
  }.freeze
20
20
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
21
21
 
22
+ attr_accessor :functions, :complete_response
23
+
22
24
  def initialize(api_key:, llm_options: {}, default_options: {})
23
25
  depends_on "ruby-openai"
24
26
  require "openai"
@@ -115,20 +117,25 @@ module Langchain::LLM
115
117
 
116
118
  parameters = compose_parameters @defaults[:chat_completion_model_name], options
117
119
  parameters[:messages] = compose_chat_messages(prompt: prompt, messages: messages, context: context, examples: examples)
118
- parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
120
+
121
+ if functions
122
+ parameters[:functions] = functions
123
+ else
124
+ parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
125
+ end
119
126
 
120
127
  if (streaming = block_given?)
121
128
  parameters[:stream] = proc do |chunk, _bytesize|
122
- yield chunk.dig("choices", 0, "delta", "content")
129
+ yield chunk if complete_response
130
+ yield chunk.dig("choices", 0, "delta", "content") if !complete_response
123
131
  end
124
132
  end
125
133
 
126
134
  response = client.chat(parameters: parameters)
127
-
128
135
  raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
129
-
130
136
  unless streaming
131
- response.dig("choices", 0, "message", "content")
137
+ return response.dig("choices", 0, "message", "content") if !complete_response
138
+ return response if complete_response
132
139
  end
133
140
  end
134
141
 
@@ -161,12 +161,16 @@ module Langchain::Vectorsearch
161
161
  end
162
162
 
163
163
  def add_data(paths:)
164
- raise ArgumentError, "Paths must be provided" if paths.to_a.empty?
164
+ raise ArgumentError, "Paths must be provided" if Array(paths).empty?
165
165
 
166
166
  texts = Array(paths)
167
167
  .flatten
168
- .map { |path| Langchain::Loader.new(path)&.load&.value }
169
- .compact
168
+ .map do |path|
169
+ data = Langchain::Loader.new(path)&.load&.chunks
170
+ data.map { |chunk| chunk[:text] }
171
+ end
172
+
173
+ texts.flatten!
170
174
 
171
175
  add_texts(texts: texts)
172
176
  end
@@ -113,8 +113,9 @@ module Langchain::Vectorsearch
113
113
 
114
114
  # Ask a question and return the answer
115
115
  # @param question [String] The question to ask
116
+ # @yield [String] Stream responses back one String at a time
116
117
  # @return [String] The answer to the question
117
- def ask(question:)
118
+ def ask(question:, &block)
118
119
  search_results = similarity_search(query: question)
119
120
 
120
121
  context = search_results.map do |result|
@@ -125,7 +126,7 @@ module Langchain::Vectorsearch
125
126
 
126
127
  prompt = generate_prompt(question: question, context: context)
127
128
 
128
- llm.chat(prompt: prompt)
129
+ llm.chat(prompt: prompt, &block)
129
130
  end
130
131
 
131
132
  private
@@ -40,20 +40,53 @@ module Langchain::Vectorsearch
40
40
  super(llm: llm)
41
41
  end
42
42
 
43
- # Add a list of texts to the index
43
+ # Upsert a list of texts to the index
44
44
  # @param texts [Array<String>] The texts to add to the index
45
- # @return [PG::Result] The response from the database
46
- def add_texts(texts:)
47
- data = texts.flat_map do |text|
48
- [text, llm.embed(text: text)]
45
+ # @param ids [Array<Integer>] The ids of the objects to add to the index, in the same order as the texts
46
+ # @return [PG::Result] The response from the database including the ids of
47
+ # the added or updated texts.
48
+ def upsert_texts(texts:, ids:)
49
+ data = texts.zip(ids).flat_map do |(text, id)|
50
+ [id, text, llm.embed(text: text)]
49
51
  end
50
- values = texts.length.times.map { |i| "($#{2 * i + 1}, $#{2 * i + 2})" }.join(",")
52
+ values = texts.length.times.map { |i| "($#{3 * i + 1}, $#{3 * i + 2}, $#{3 * i + 3})" }.join(",")
53
+ # see https://github.com/pgvector/pgvector#storing
51
54
  client.exec_params(
52
- "INSERT INTO #{quoted_table_name} (content, vectors) VALUES #{values};",
55
+ "INSERT INTO #{quoted_table_name} (id, content, vectors) VALUES
56
+ #{values} ON CONFLICT (id) DO UPDATE SET content = EXCLUDED.content, vectors = EXCLUDED.vectors RETURNING id;",
53
57
  data
54
58
  )
55
59
  end
56
60
 
61
+ # Add a list of texts to the index
62
+ # @param texts [Array<String>] The texts to add to the index
63
+ # @param ids [Array<String>] The ids to add to the index, in the same order as the texts
64
+ # @return [PG::Result] The response from the database including the ids of
65
+ # the added texts.
66
+ def add_texts(texts:, ids: nil)
67
+ if ids.nil? || ids.empty?
68
+ data = texts.flat_map do |text|
69
+ [text, llm.embed(text: text)]
70
+ end
71
+ values = texts.length.times.map { |i| "($#{2 * i + 1}, $#{2 * i + 2})" }.join(",")
72
+ client.exec_params(
73
+ "INSERT INTO #{quoted_table_name} (content, vectors) VALUES #{values} RETURNING id;",
74
+ data
75
+ )
76
+ else
77
+ upsert_texts(texts: texts, ids: ids)
78
+ end
79
+ end
80
+
81
+ # Update a list of ids and corresponding texts to the index
82
+ # @param texts [Array<String>] The texts to add to the index
83
+ # @param ids [Array<String>] The ids to add to the index, in the same order as the texts
84
+ # @return [PG::Result] The response from the database including the ids of
85
+ # the updated texts.
86
+ def update_texts(texts:, ids:)
87
+ upsert_texts(texts: texts, ids: ids)
88
+ end
89
+
57
90
  # Create default schema
58
91
  # @return [PG::Result] The response from the database
59
92
  def create_default_schema
@@ -103,8 +136,9 @@ module Langchain::Vectorsearch
103
136
 
104
137
  # Ask a question and return the answer
105
138
  # @param question [String] The question to ask
139
+ # @yield [String] Stream responses back one String at a time
106
140
  # @return [String] The answer to the question
107
- def ask(question:)
141
+ def ask(question:, &block)
108
142
  search_results = similarity_search(query: question)
109
143
 
110
144
  context = search_results.map do |result|
@@ -114,7 +148,7 @@ module Langchain::Vectorsearch
114
148
 
115
149
  prompt = generate_prompt(question: question, context: context)
116
150
 
117
- llm.chat(prompt: prompt)
151
+ llm.chat(prompt: prompt, &block)
118
152
  end
119
153
  end
120
154
  end
@@ -139,8 +139,9 @@ module Langchain::Vectorsearch
139
139
  # @param question [String] The question to ask
140
140
  # @param namespace [String] The namespace to search in
141
141
  # @param filter [String] The filter to use
142
+ # @yield [String] Stream responses back one String at a time
142
143
  # @return [String] The answer to the question
143
- def ask(question:, namespace: "", filter: nil)
144
+ def ask(question:, namespace: "", filter: nil, &block)
144
145
  search_results = similarity_search(query: question, namespace: namespace, filter: filter)
145
146
 
146
147
  context = search_results.map do |result|
@@ -150,7 +151,7 @@ module Langchain::Vectorsearch
150
151
 
151
152
  prompt = generate_prompt(question: question, context: context)
152
153
 
153
- llm.chat(prompt: prompt)
154
+ llm.chat(prompt: prompt, &block)
154
155
  end
155
156
 
156
157
  # Pinecone index
@@ -112,8 +112,9 @@ module Langchain::Vectorsearch
112
112
 
113
113
  # Ask a question and return the answer
114
114
  # @param question [String] The question to ask
115
+ # @yield [String] Stream responses back one String at a time
115
116
  # @return [String] The answer to the question
116
- def ask(question:)
117
+ def ask(question:, &block)
117
118
  search_results = similarity_search(query: question)
118
119
 
119
120
  context = search_results.map do |result|
@@ -123,7 +124,7 @@ module Langchain::Vectorsearch
123
124
 
124
125
  prompt = generate_prompt(question: question, context: context)
125
126
 
126
- llm.chat(prompt: prompt)
127
+ llm.chat(prompt: prompt, &block)
127
128
  end
128
129
  end
129
130
  end
@@ -124,8 +124,9 @@ module Langchain::Vectorsearch
124
124
 
125
125
  # Ask a question and return the answer
126
126
  # @param question [String] The question to ask
127
+ # @yield [String] Stream responses back one String at a time
127
128
  # @return [Hash] The answer
128
- def ask(question:)
129
+ def ask(question:, &block)
129
130
  search_results = similarity_search(query: question)
130
131
 
131
132
  context = search_results.map do |result|
@@ -135,7 +136,7 @@ module Langchain::Vectorsearch
135
136
 
136
137
  prompt = generate_prompt(question: question, context: context)
137
138
 
138
- llm.chat(prompt: prompt)
139
+ llm.chat(prompt: prompt, &block)
139
140
  end
140
141
 
141
142
  private
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.5"
4
+ VERSION = "0.6.7"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -82,6 +82,7 @@ module Langchain
82
82
  module Chunker
83
83
  autoload :Base, "langchain/chunker/base"
84
84
  autoload :Text, "langchain/chunker/text"
85
+ autoload :RecursiveText, "langchain/chunker/recursive_text"
85
86
  end
86
87
 
87
88
  module Tool
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.5
4
+ version: 0.6.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-07-06 00:00:00.000000000 Z
11
+ date: 2023-07-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -408,14 +408,14 @@ dependencies:
408
408
  requirements:
409
409
  - - "~>"
410
410
  - !ruby/object:Gem::Version
411
- version: 4.0.0
411
+ version: 4.1.0
412
412
  type: :development
413
413
  prerelease: false
414
414
  version_requirements: !ruby/object:Gem::Requirement
415
415
  requirements:
416
416
  - - "~>"
417
417
  - !ruby/object:Gem::Version
418
- version: 4.0.0
418
+ version: 4.1.0
419
419
  - !ruby/object:Gem::Dependency
420
420
  name: safe_ruby
421
421
  requirement: !ruby/object:Gem::Requirement
@@ -493,6 +493,8 @@ files:
493
493
  - examples/create_and_manage_prompt_templates.rb
494
494
  - examples/create_and_manage_prompt_templates_using_structured_output_parser.rb
495
495
  - examples/llama_cpp.rb
496
+ - examples/open_ai_function_calls.rb
497
+ - examples/open_ai_qdrant_function_calls.rb
496
498
  - examples/pdf_store_and_query_with_chroma.rb
497
499
  - examples/store_and_query_with_pinecone.rb
498
500
  - examples/store_and_query_with_qdrant.rb
@@ -507,6 +509,7 @@ files:
507
509
  - lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml
508
510
  - lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml
509
511
  - lib/langchain/chunker/base.rb
512
+ - lib/langchain/chunker/recursive_text.rb
510
513
  - lib/langchain/chunker/text.rb
511
514
  - lib/langchain/contextual_logger.rb
512
515
  - lib/langchain/conversation.rb
@@ -588,7 +591,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
588
591
  - !ruby/object:Gem::Version
589
592
  version: '0'
590
593
  requirements: []
591
- rubygems_version: 3.2.3
594
+ rubygems_version: 3.3.7
592
595
  signing_key:
593
596
  specification_version: 4
594
597
  summary: Build LLM-backed Ruby applications with Ruby's LangChain