langchainrb 0.6.13 → 0.6.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/lib/langchain/agent/react_agent.rb +2 -2
- data/lib/langchain/dependency_helper.rb +9 -2
- data/lib/langchain/llm/ai21.rb +0 -1
- data/lib/langchain/llm/anthropic.rb +0 -1
- data/lib/langchain/llm/cohere.rb +1 -2
- data/lib/langchain/llm/google_palm.rb +0 -1
- data/lib/langchain/llm/hugging_face.rb +1 -2
- data/lib/langchain/llm/llama_cpp.rb +0 -1
- data/lib/langchain/llm/openai.rb +51 -11
- data/lib/langchain/llm/replicate.rb +1 -2
- data/lib/langchain/loader.rb +2 -3
- data/lib/langchain/processors/docx.rb +0 -1
- data/lib/langchain/processors/html.rb +0 -1
- data/lib/langchain/processors/pdf.rb +0 -1
- data/lib/langchain/processors/xlsx.rb +0 -1
- data/lib/langchain/tool/calculator.rb +0 -1
- data/lib/langchain/tool/database.rb +0 -1
- data/lib/langchain/tool/google_search.rb +74 -9
- data/lib/langchain/tool/ruby_code_interpreter.rb +2 -2
- data/lib/langchain/tool/wikipedia.rb +1 -2
- data/lib/langchain/vectorsearch/chroma.rb +0 -1
- data/lib/langchain/vectorsearch/hnswlib.rb +0 -1
- data/lib/langchain/vectorsearch/milvus.rb +0 -1
- data/lib/langchain/vectorsearch/pgvector.rb +0 -2
- data/lib/langchain/vectorsearch/pinecone.rb +13 -1
- data/lib/langchain/vectorsearch/qdrant.rb +13 -2
- data/lib/langchain/vectorsearch/weaviate.rb +1 -2
- data/lib/langchain/version.rb +1 -1
- metadata +7 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7beb48b4b2bc88c4a25bef1cbc9eb0e95c0bd3eaeb02af6a12e0026c9081dd6d
|
4
|
+
data.tar.gz: ebdb7816bf0e37e221a13ecf6cb620a335d3d49b564be76f4f714cb9849ebeb6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1cb2c147ffbe999eb1f027161e6cda3beea76e31b821bdec564eb36cc6a2d96e31c5d450be8d744738fabef07a9f519c8b96ab2e6dc9585fb05ceea7ebc494a2
|
7
|
+
data.tar.gz: 9f3a1d015de4f568bea1e08637a07ed6bf2ef93bb68068ebe51a50c16ca5a1d5d3f850cf19439ad785b6078305a7dfbd740f7bf7916c1e3466efdb04060f360e
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,12 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.6.14] - 2023-09-11
|
4
|
+
- Add `find` method to `Langchain::Vectorsearch::Qdrant`
|
5
|
+
- Enhance Google search output
|
6
|
+
- Raise ApiError when OpenAI returns an error
|
7
|
+
- Update OpenAI `complete` method to use chat completion api
|
8
|
+
- Deprecate legacy completion models. See https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings
|
9
|
+
|
3
10
|
## [0.6.13] - 2023-08-23
|
4
11
|
- Add `k:` parameter to all `ask()` vector search methods
|
5
12
|
- Bump Faraday to 2.x
|
@@ -83,9 +83,9 @@ module Langchain::Agent
|
|
83
83
|
else
|
84
84
|
"\nObservation: #{result}\nThought:"
|
85
85
|
end
|
86
|
-
|
86
|
+
elsif response.include?("Final Answer:")
|
87
87
|
# Return the final answer
|
88
|
-
final_response = response.
|
88
|
+
final_response = response.split("Final Answer:")[-1]
|
89
89
|
break
|
90
90
|
end
|
91
91
|
end
|
@@ -2,6 +2,8 @@
|
|
2
2
|
|
3
3
|
module Langchain
|
4
4
|
module DependencyHelper
|
5
|
+
class LoadError < ::LoadError; end
|
6
|
+
|
5
7
|
class VersionError < ScriptError; end
|
6
8
|
|
7
9
|
# This method requires and loads the given gem, and then checks to see if the version of the gem meets the requirements listed in `langchain.gemspec`
|
@@ -12,7 +14,7 @@ module Langchain
|
|
12
14
|
# @raise [LoadError] If the gem is not installed
|
13
15
|
# @raise [VersionError] If the gem is installed, but the version does not meet the requirements
|
14
16
|
#
|
15
|
-
def depends_on(gem_name)
|
17
|
+
def depends_on(gem_name, req: true)
|
16
18
|
gem(gem_name) # require the gem
|
17
19
|
|
18
20
|
return(true) unless defined?(Bundler) # If we're in a non-bundler environment, we're no longer able to determine if we'll meet requirements
|
@@ -26,8 +28,13 @@ module Langchain
|
|
26
28
|
raise VersionError, "The #{gem_name} gem is installed, but version #{gem_requirement} is required. You have #{gem_version}."
|
27
29
|
end
|
28
30
|
|
31
|
+
lib_name = gem_name if req == true
|
32
|
+
lib_name = req if req.is_a?(String)
|
33
|
+
|
34
|
+
require(lib_name) if lib_name
|
35
|
+
|
29
36
|
true
|
30
|
-
rescue LoadError
|
37
|
+
rescue ::LoadError
|
31
38
|
raise LoadError, "Could not load #{gem_name}. Please ensure that the #{gem_name} gem is installed."
|
32
39
|
end
|
33
40
|
end
|
data/lib/langchain/llm/ai21.rb
CHANGED
@@ -22,7 +22,6 @@ module Langchain::LLM
|
|
22
22
|
|
23
23
|
def initialize(api_key:, llm_options: {}, default_options: {})
|
24
24
|
depends_on "anthropic"
|
25
|
-
require "anthropic"
|
26
25
|
|
27
26
|
@client = ::Anthropic::Client.new(access_token: api_key, **llm_options)
|
28
27
|
@defaults = DEFAULTS.merge(default_options)
|
data/lib/langchain/llm/cohere.rb
CHANGED
@@ -20,8 +20,7 @@ module Langchain::LLM
|
|
20
20
|
}.freeze
|
21
21
|
|
22
22
|
def initialize(api_key:, default_options: {})
|
23
|
-
depends_on "cohere-ruby"
|
24
|
-
require "cohere"
|
23
|
+
depends_on "cohere-ruby", req: "cohere"
|
25
24
|
|
26
25
|
@client = ::Cohere::Client.new(api_key: api_key)
|
27
26
|
@defaults = DEFAULTS.merge(default_options)
|
@@ -25,8 +25,7 @@ module Langchain::LLM
|
|
25
25
|
# @param api_key [String] The API key to use
|
26
26
|
#
|
27
27
|
def initialize(api_key:)
|
28
|
-
depends_on "hugging-face"
|
29
|
-
require "hugging_face"
|
28
|
+
depends_on "hugging-face", req: "hugging_face"
|
30
29
|
|
31
30
|
@client = ::HuggingFace::InferenceApi.new(api_token: api_key)
|
32
31
|
end
|
data/lib/langchain/llm/openai.rb
CHANGED
@@ -12,11 +12,19 @@ module Langchain::LLM
|
|
12
12
|
class OpenAI < Base
|
13
13
|
DEFAULTS = {
|
14
14
|
temperature: 0.0,
|
15
|
-
completion_model_name: "
|
15
|
+
completion_model_name: "gpt-3.5-turbo",
|
16
16
|
chat_completion_model_name: "gpt-3.5-turbo",
|
17
17
|
embeddings_model_name: "text-embedding-ada-002",
|
18
18
|
dimension: 1536
|
19
19
|
}.freeze
|
20
|
+
|
21
|
+
LEGACY_COMPLETION_MODELS = %w[
|
22
|
+
ada
|
23
|
+
babbage
|
24
|
+
curie
|
25
|
+
davinci
|
26
|
+
].freeze
|
27
|
+
|
20
28
|
LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
|
21
29
|
ROLE_MAPPING = {
|
22
30
|
"ai" => "assistant",
|
@@ -26,8 +34,7 @@ module Langchain::LLM
|
|
26
34
|
attr_accessor :functions
|
27
35
|
|
28
36
|
def initialize(api_key:, llm_options: {}, default_options: {})
|
29
|
-
depends_on "ruby-openai"
|
30
|
-
require "openai"
|
37
|
+
depends_on "ruby-openai", req: "openai"
|
31
38
|
|
32
39
|
@client = ::OpenAI::Client.new(access_token: api_key, **llm_options)
|
33
40
|
@defaults = DEFAULTS.merge(default_options)
|
@@ -45,7 +52,10 @@ module Langchain::LLM
|
|
45
52
|
|
46
53
|
validate_max_tokens(text, parameters[:model])
|
47
54
|
|
48
|
-
response =
|
55
|
+
response = with_api_error_handling do
|
56
|
+
client.embeddings(parameters: parameters.merge(params))
|
57
|
+
end
|
58
|
+
|
49
59
|
response.dig("data").first.dig("embedding")
|
50
60
|
end
|
51
61
|
|
@@ -59,11 +69,16 @@ module Langchain::LLM
|
|
59
69
|
def complete(prompt:, **params)
|
60
70
|
parameters = compose_parameters @defaults[:completion_model_name], params
|
61
71
|
|
62
|
-
parameters[:
|
63
|
-
parameters[:max_tokens] = validate_max_tokens(prompt, parameters[:model])
|
72
|
+
return legacy_complete(prompt, parameters) if is_legacy_model?(parameters[:model])
|
64
73
|
|
65
|
-
|
66
|
-
|
74
|
+
parameters[:messages] = compose_chat_messages(prompt: prompt)
|
75
|
+
parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
|
76
|
+
|
77
|
+
response = with_api_error_handling do
|
78
|
+
client.chat(parameters: parameters)
|
79
|
+
end
|
80
|
+
|
81
|
+
response.dig("choices", 0, "message", "content")
|
67
82
|
end
|
68
83
|
|
69
84
|
#
|
@@ -131,8 +146,10 @@ module Langchain::LLM
|
|
131
146
|
end
|
132
147
|
end
|
133
148
|
|
134
|
-
response =
|
135
|
-
|
149
|
+
response = with_api_error_handling do
|
150
|
+
client.chat(parameters: parameters)
|
151
|
+
end
|
152
|
+
|
136
153
|
unless streaming
|
137
154
|
message = response.dig("choices", 0, "message")
|
138
155
|
content = message["content"]
|
@@ -158,6 +175,22 @@ module Langchain::LLM
|
|
158
175
|
|
159
176
|
private
|
160
177
|
|
178
|
+
def is_legacy_model?(model)
|
179
|
+
LEGACY_COMPLETION_MODELS.any? { |legacy_model| model.include?(legacy_model) }
|
180
|
+
end
|
181
|
+
|
182
|
+
def legacy_complete(prompt, parameters)
|
183
|
+
Langchain.logger.warn "DEPRECATION WARNING: The model #{parameters[:model]} is deprecated. Please use gpt-3.5-turbo instead. Details: https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings"
|
184
|
+
|
185
|
+
parameters[:prompt] = prompt
|
186
|
+
parameters[:max_tokens] = validate_max_tokens(prompt, parameters[:model])
|
187
|
+
|
188
|
+
response = with_api_error_handling do
|
189
|
+
client.completions(parameters: parameters)
|
190
|
+
end
|
191
|
+
response.dig("choices", 0, "text")
|
192
|
+
end
|
193
|
+
|
161
194
|
def compose_parameters(model, params)
|
162
195
|
default_params = {model: model, temperature: @defaults[:temperature]}
|
163
196
|
|
@@ -166,7 +199,7 @@ module Langchain::LLM
|
|
166
199
|
default_params.merge(params)
|
167
200
|
end
|
168
201
|
|
169
|
-
def compose_chat_messages(prompt:, messages
|
202
|
+
def compose_chat_messages(prompt:, messages: [], context: "", examples: [])
|
170
203
|
history = []
|
171
204
|
|
172
205
|
history.concat transform_messages(examples) unless examples.empty?
|
@@ -198,6 +231,13 @@ module Langchain::LLM
|
|
198
231
|
end
|
199
232
|
end
|
200
233
|
|
234
|
+
def with_api_error_handling
|
235
|
+
response = yield
|
236
|
+
raise Langchain::LLM::ApiError.new "OpenAI API error: #{response.dig("error", "message")}" if response&.dig("error")
|
237
|
+
|
238
|
+
response
|
239
|
+
end
|
240
|
+
|
201
241
|
def validate_max_tokens(messages, model)
|
202
242
|
LENGTH_VALIDATOR.validate_max_tokens!(messages, model)
|
203
243
|
end
|
@@ -33,8 +33,7 @@ module Langchain::LLM
|
|
33
33
|
# @param api_key [String] The API key to use
|
34
34
|
#
|
35
35
|
def initialize(api_key:, default_options: {})
|
36
|
-
depends_on "replicate-ruby"
|
37
|
-
require "replicate"
|
36
|
+
depends_on "replicate-ruby", req: "replicate"
|
38
37
|
|
39
38
|
::Replicate.configure do |config|
|
40
39
|
config.api_token = api_key
|
data/lib/langchain/loader.rb
CHANGED
@@ -98,8 +98,8 @@ module Langchain
|
|
98
98
|
Dir.glob(File.join(@path, "**/*")).map do |file|
|
99
99
|
# Only load and add to result files with supported extensions
|
100
100
|
Langchain::Loader.new(file, @options).load(&block)
|
101
|
-
rescue
|
102
|
-
UnknownFormatError
|
101
|
+
rescue
|
102
|
+
UnknownFormatError nil
|
103
103
|
end.flatten.compact
|
104
104
|
end
|
105
105
|
|
@@ -134,7 +134,6 @@ module Langchain
|
|
134
134
|
end
|
135
135
|
|
136
136
|
def source_type
|
137
|
-
binding.pry
|
138
137
|
url? ? @raw_data.content_type : File.extname(@path)
|
139
138
|
end
|
140
139
|
|
@@ -3,7 +3,7 @@
|
|
3
3
|
module Langchain::Tool
|
4
4
|
class GoogleSearch < Base
|
5
5
|
#
|
6
|
-
# Wrapper around Google
|
6
|
+
# Wrapper around SerpApi's Google Search API
|
7
7
|
#
|
8
8
|
# Gem requirements: gem "google_search_results", "~> 2.0.0"
|
9
9
|
#
|
@@ -15,7 +15,7 @@ module Langchain::Tool
|
|
15
15
|
NAME = "google_search"
|
16
16
|
|
17
17
|
description <<~DESC
|
18
|
-
A wrapper around Google Search.
|
18
|
+
A wrapper around SerpApi's Google Search API.
|
19
19
|
|
20
20
|
Useful for when you need to answer questions about current events.
|
21
21
|
Always one of the first options when you need to find information on internet.
|
@@ -33,7 +33,7 @@ module Langchain::Tool
|
|
33
33
|
#
|
34
34
|
def initialize(api_key:)
|
35
35
|
depends_on "google_search_results"
|
36
|
-
|
36
|
+
|
37
37
|
@api_key = api_key
|
38
38
|
end
|
39
39
|
|
@@ -56,13 +56,78 @@ module Langchain::Tool
|
|
56
56
|
def execute(input:)
|
57
57
|
Langchain.logger.info("Executing \"#{input}\"", for: self.class)
|
58
58
|
|
59
|
-
|
59
|
+
results = execute_search(input: input)
|
60
|
+
|
61
|
+
answer_box = results[:answer_box_list] ? results[:answer_box_list].first : results[:answer_box]
|
62
|
+
if answer_box
|
63
|
+
return answer_box[:result] ||
|
64
|
+
answer_box[:answer] ||
|
65
|
+
answer_box[:snippet] ||
|
66
|
+
answer_box[:snippet_highlighted_words] ||
|
67
|
+
answer_box.reject { |_k, v| v.is_a?(Hash) || v.is_a?(Array) || v.start_with?("http") }
|
68
|
+
elsif (events_results = results[:events_results])
|
69
|
+
return events_results.take(10)
|
70
|
+
elsif (sports_results = results[:sports_results])
|
71
|
+
return sports_results
|
72
|
+
elsif (top_stories = results[:top_stories])
|
73
|
+
return top_stories
|
74
|
+
elsif (news_results = results[:news_results])
|
75
|
+
return news_results
|
76
|
+
elsif (jobs_results = results.dig(:jobs_results, :jobs))
|
77
|
+
return jobs_results
|
78
|
+
elsif (shopping_results = results[:shopping_results]) && shopping_results.first.key?(:title)
|
79
|
+
return shopping_results.take(3)
|
80
|
+
elsif (questions_and_answers = results[:questions_and_answers])
|
81
|
+
return questions_and_answers
|
82
|
+
elsif (popular_destinations = results.dig(:popular_destinations, :destinations))
|
83
|
+
return popular_destinations
|
84
|
+
elsif (top_sights = results.dig(:top_sights, :sights))
|
85
|
+
return top_sights
|
86
|
+
elsif (images_results = results[:images_results]) && images_results.first.key?(:thumbnail)
|
87
|
+
return images_results.map { |h| h[:thumbnail] }.take(10)
|
88
|
+
end
|
89
|
+
|
90
|
+
snippets = []
|
91
|
+
if (knowledge_graph = results[:knowledge_graph])
|
92
|
+
snippets << knowledge_graph[:description] if knowledge_graph[:description]
|
93
|
+
|
94
|
+
title = knowledge_graph[:title] || ""
|
95
|
+
knowledge_graph.each do |k, v|
|
96
|
+
if v.is_a?(String) &&
|
97
|
+
k != :title &&
|
98
|
+
k != :description &&
|
99
|
+
!k.to_s.end_with?("_stick") &&
|
100
|
+
!k.to_s.end_with?("_link") &&
|
101
|
+
!k.to_s.start_with?("http")
|
102
|
+
snippets << "#{title} #{k}: #{v}"
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
if (first_organic_result = results.dig(:organic_results, 0))
|
108
|
+
if (snippet = first_organic_result[:snippet])
|
109
|
+
snippets << snippet
|
110
|
+
elsif (snippet_highlighted_words = first_organic_result[:snippet_highlighted_words])
|
111
|
+
snippets << snippet_highlighted_words
|
112
|
+
elsif (rich_snippet = first_organic_result[:rich_snippet])
|
113
|
+
snippets << rich_snippet
|
114
|
+
elsif (rich_snippet_table = first_organic_result[:rich_snippet_table])
|
115
|
+
snippets << rich_snippet_table
|
116
|
+
elsif (link = first_organic_result[:link])
|
117
|
+
snippets << link
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
if (buying_guide = results[:buying_guide])
|
122
|
+
snippets << buying_guide
|
123
|
+
end
|
124
|
+
|
125
|
+
if (local_results = results.dig(:local_results, :places))
|
126
|
+
snippets << local_results
|
127
|
+
end
|
60
128
|
|
61
|
-
|
62
|
-
|
63
|
-
hash_results.dig(:answer_box, :answer) ||
|
64
|
-
hash_results.dig(:answer_box, :snippet) ||
|
65
|
-
hash_results.dig(:organic_results, 0, :snippet)
|
129
|
+
return "No good search result found" if snippets.empty?
|
130
|
+
snippets
|
66
131
|
end
|
67
132
|
|
68
133
|
#
|
@@ -26,9 +26,7 @@ module Langchain::Vectorsearch
|
|
26
26
|
# @param namespace [String] The namespace to use for the index when inserting/querying
|
27
27
|
def initialize(url:, index_name:, llm:, namespace: nil)
|
28
28
|
depends_on "sequel"
|
29
|
-
require "sequel"
|
30
29
|
depends_on "pgvector"
|
31
|
-
require "pgvector"
|
32
30
|
|
33
31
|
@db = Sequel.connect(url)
|
34
32
|
|
@@ -18,7 +18,6 @@ module Langchain::Vectorsearch
|
|
18
18
|
# @param llm [Object] The LLM client to use
|
19
19
|
def initialize(environment:, api_key:, index_name:, llm:)
|
20
20
|
depends_on "pinecone"
|
21
|
-
require "pinecone"
|
22
21
|
|
23
22
|
::Pinecone.configure do |config|
|
24
23
|
config.api_key = api_key
|
@@ -31,6 +30,19 @@ module Langchain::Vectorsearch
|
|
31
30
|
super(llm: llm)
|
32
31
|
end
|
33
32
|
|
33
|
+
# Find records by ids
|
34
|
+
# @param ids [Array] The ids to find
|
35
|
+
# @param namespace String The namespace to search through
|
36
|
+
# @return [Hash] The response from the server
|
37
|
+
def find(ids: [], namespace: "")
|
38
|
+
raise ArgumentError, "Ids must be provided" if Array(ids).empty?
|
39
|
+
|
40
|
+
client.index(index_name).fetch(
|
41
|
+
ids: ids,
|
42
|
+
namespace: namespace
|
43
|
+
)
|
44
|
+
end
|
45
|
+
|
34
46
|
# Add a list of texts to the index
|
35
47
|
# @param texts [Array] The list of texts to add
|
36
48
|
# @param ids [Array] The list of IDs to add
|
@@ -17,8 +17,7 @@ module Langchain::Vectorsearch
|
|
17
17
|
# @param index_name [String] The name of the index to use
|
18
18
|
# @param llm [Object] The LLM client to use
|
19
19
|
def initialize(url:, api_key:, index_name:, llm:)
|
20
|
-
depends_on "qdrant-ruby"
|
21
|
-
require "qdrant"
|
20
|
+
depends_on "qdrant-ruby", req: "qdrant"
|
22
21
|
|
23
22
|
@client = ::Qdrant::Client.new(
|
24
23
|
url: url,
|
@@ -29,6 +28,18 @@ module Langchain::Vectorsearch
|
|
29
28
|
super(llm: llm)
|
30
29
|
end
|
31
30
|
|
31
|
+
# Find records by ids
|
32
|
+
# @param ids [Array] The ids to find
|
33
|
+
# @return [Hash] The response from the server
|
34
|
+
def find(ids: [])
|
35
|
+
client.points.get_all(
|
36
|
+
collection_name: index_name,
|
37
|
+
ids: ids,
|
38
|
+
with_payload: true,
|
39
|
+
with_vector: true
|
40
|
+
)
|
41
|
+
end
|
42
|
+
|
32
43
|
# Add a list of texts to the index
|
33
44
|
# @param texts [Array] The list of texts to add
|
34
45
|
# @return [Hash] The response from the server
|
@@ -17,8 +17,7 @@ module Langchain::Vectorsearch
|
|
17
17
|
# @param index_name [String] The capitalized name of the index to use
|
18
18
|
# @param llm [Object] The LLM client to use
|
19
19
|
def initialize(url:, api_key:, index_name:, llm:)
|
20
|
-
depends_on "weaviate-ruby"
|
21
|
-
require "weaviate"
|
20
|
+
depends_on "weaviate-ruby", req: "weaviate"
|
22
21
|
|
23
22
|
@client = ::Weaviate::Client.new(
|
24
23
|
url: url,
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.14
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-09-11 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 0.1.
|
19
|
+
version: 0.1.8
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: 0.1.
|
26
|
+
version: 0.1.8
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: colorize
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|
@@ -408,14 +408,14 @@ dependencies:
|
|
408
408
|
requirements:
|
409
409
|
- - "~>"
|
410
410
|
- !ruby/object:Gem::Version
|
411
|
-
version: 0.9.
|
411
|
+
version: 0.9.4
|
412
412
|
type: :development
|
413
413
|
prerelease: false
|
414
414
|
version_requirements: !ruby/object:Gem::Requirement
|
415
415
|
requirements:
|
416
416
|
- - "~>"
|
417
417
|
- !ruby/object:Gem::Version
|
418
|
-
version: 0.9.
|
418
|
+
version: 0.9.4
|
419
419
|
- !ruby/object:Gem::Dependency
|
420
420
|
name: roo
|
421
421
|
requirement: !ruby/object:Gem::Requirement
|
@@ -606,7 +606,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
606
606
|
- !ruby/object:Gem::Version
|
607
607
|
version: '0'
|
608
608
|
requirements: []
|
609
|
-
rubygems_version: 3.
|
609
|
+
rubygems_version: 3.2.33
|
610
610
|
signing_key:
|
611
611
|
specification_version: 4
|
612
612
|
summary: Build LLM-backed Ruby applications with Ruby's LangChain
|