langchainrb 0.13.4 → 0.13.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/README.md +1 -1
- data/lib/langchain/assistants/assistant.rb +118 -64
- data/lib/langchain/assistants/messages/base.rb +35 -1
- data/lib/langchain/llm/google_gemini.rb +17 -3
- data/lib/langchain/llm/replicate.rb +1 -1
- data/lib/langchain/loader.rb +3 -1
- data/lib/langchain/utils/hash_transformer.rb +25 -0
- data/lib/langchain/vectorsearch/chroma.rb +3 -1
- data/lib/langchain/vectorsearch/milvus.rb +17 -2
- data/lib/langchain/version.rb +1 -1
- metadata +6 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d7eac7a6ba7767f6a3f84ee808fa4810eaa1843776695ab0225ddd6b77cf7a73
|
4
|
+
data.tar.gz: e9f7c0170fc2a8dbf443f1bac24874878ee0fbba7e0495bf65a8df969d3d86e6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e4d14ac64e54e5c7245a9586dfb4899154793ea466f9564a510eb3dfe17a3a7229cf61e408445b38fec37500065b5e1ee725afa634284bea5538abac0766237f
|
7
|
+
data.tar.gz: e8fe3e1639a3f2ed087436610dd1653e775703c1c6cc83f7f52eb7d3fb46db554e7be790bc6bc2ddf18ec4e3c26dddbe1ec72e8f25603db1192e5a111d0f9543
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,10 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.13.5] - 2024-07-01
|
4
|
+
- Add Milvus#remove_texts() method
|
5
|
+
- Langchain::Assistant has a `state` now
|
6
|
+
- Misc fixes and improvements
|
7
|
+
|
3
8
|
## [0.13.4] - 2024-06-16
|
4
9
|
- Fix Chroma#remove_texts() method
|
5
10
|
- Fix NewsRetriever Tool returning non UTF-8 characters
|
data/README.md
CHANGED
@@ -343,7 +343,7 @@ You can instantiate any other supported vector search database:
|
|
343
343
|
client = Langchain::Vectorsearch::Chroma.new(...) # `gem "chroma-db", "~> 0.6.0"`
|
344
344
|
client = Langchain::Vectorsearch::Epsilla.new(...) # `gem "epsilla-ruby", "~> 0.0.3"`
|
345
345
|
client = Langchain::Vectorsearch::Hnswlib.new(...) # `gem "hnswlib", "~> 0.8.1"`
|
346
|
-
client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.
|
346
|
+
client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.3"`
|
347
347
|
client = Langchain::Vectorsearch::Pinecone.new(...) # `gem "pinecone", "~> 0.1.6"`
|
348
348
|
client = Langchain::Vectorsearch::Pgvector.new(...) # `gem "pgvector", "~> 0.2"`
|
349
349
|
client = Langchain::Vectorsearch::Qdrant.new(...) # `gem "qdrant-ruby", "~> 0.9.3"`
|
@@ -15,7 +15,7 @@ module Langchain
|
|
15
15
|
extend Forwardable
|
16
16
|
def_delegators :thread, :messages, :messages=
|
17
17
|
|
18
|
-
attr_reader :llm, :thread, :instructions
|
18
|
+
attr_reader :llm, :thread, :instructions, :state
|
19
19
|
attr_accessor :tools
|
20
20
|
|
21
21
|
SUPPORTED_LLMS = [
|
@@ -46,6 +46,7 @@ module Langchain
|
|
46
46
|
@thread = thread || Langchain::Thread.new
|
47
47
|
@tools = tools
|
48
48
|
@instructions = instructions
|
49
|
+
@state = :ready
|
49
50
|
|
50
51
|
raise ArgumentError, "Thread must be an instance of Langchain::Thread" unless @thread.is_a?(Langchain::Thread)
|
51
52
|
|
@@ -66,7 +67,10 @@ module Langchain
|
|
66
67
|
# @return [Array<Langchain::Message>] The messages in the thread
|
67
68
|
def add_message(content: nil, role: "user", tool_calls: [], tool_call_id: nil)
|
68
69
|
message = build_message(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
69
|
-
thread.add_message(message)
|
70
|
+
messages = thread.add_message(message)
|
71
|
+
@state = :ready
|
72
|
+
|
73
|
+
messages
|
70
74
|
end
|
71
75
|
|
72
76
|
# Run the assistant
|
@@ -76,56 +80,12 @@ module Langchain
|
|
76
80
|
def run(auto_tool_execution: false)
|
77
81
|
if thread.messages.empty?
|
78
82
|
Langchain.logger.warn("No messages in the thread")
|
83
|
+
@state = :completed
|
79
84
|
return
|
80
85
|
end
|
81
86
|
|
82
|
-
|
83
|
-
|
84
|
-
while running
|
85
|
-
# TODO: I think we need to look at all messages and not just the last one.
|
86
|
-
last_message = thread.messages.last
|
87
|
-
|
88
|
-
if last_message.system?
|
89
|
-
# Do nothing
|
90
|
-
running = false
|
91
|
-
elsif last_message.llm?
|
92
|
-
if last_message.tool_calls.any?
|
93
|
-
if auto_tool_execution
|
94
|
-
run_tools(last_message.tool_calls)
|
95
|
-
else
|
96
|
-
# Maybe log and tell the user that there's outstanding tool calls?
|
97
|
-
running = false
|
98
|
-
end
|
99
|
-
else
|
100
|
-
# Last message was from the assistant without any tools calls.
|
101
|
-
# Do nothing
|
102
|
-
running = false
|
103
|
-
end
|
104
|
-
elsif last_message.user?
|
105
|
-
# Run it!
|
106
|
-
response = chat_with_llm
|
107
|
-
|
108
|
-
if response.tool_calls.any?
|
109
|
-
# Re-run the while(running) loop to process the tool calls
|
110
|
-
running = true
|
111
|
-
add_message(role: response.role, tool_calls: response.tool_calls)
|
112
|
-
elsif response.chat_completion
|
113
|
-
# Stop the while(running) loop and add the assistant's response to the thread
|
114
|
-
running = false
|
115
|
-
add_message(role: response.role, content: response.chat_completion)
|
116
|
-
end
|
117
|
-
elsif last_message.tool?
|
118
|
-
# Run it!
|
119
|
-
response = chat_with_llm
|
120
|
-
running = true
|
121
|
-
|
122
|
-
if response.tool_calls.any?
|
123
|
-
add_message(role: response.role, tool_calls: response.tool_calls)
|
124
|
-
elsif response.chat_completion
|
125
|
-
add_message(role: response.role, content: response.chat_completion)
|
126
|
-
end
|
127
|
-
end
|
128
|
-
end
|
87
|
+
@state = :in_progress
|
88
|
+
@state = handle_state until run_finished?(auto_tool_execution)
|
129
89
|
|
130
90
|
thread.messages
|
131
91
|
end
|
@@ -146,13 +106,7 @@ module Langchain
|
|
146
106
|
# @param output [String] The output of the tool
|
147
107
|
# @return [Array<Langchain::Message>] The messages in the thread
|
148
108
|
def submit_tool_output(tool_call_id:, output:)
|
149
|
-
tool_role =
|
150
|
-
Langchain::Messages::OpenAIMessage::TOOL_ROLE
|
151
|
-
elsif [Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI].include?(llm.class)
|
152
|
-
Langchain::Messages::GoogleGeminiMessage::TOOL_ROLE
|
153
|
-
elsif llm.is_a?(Langchain::LLM::Anthropic)
|
154
|
-
Langchain::Messages::AnthropicMessage::TOOL_ROLE
|
155
|
-
end
|
109
|
+
tool_role = determine_tool_role
|
156
110
|
|
157
111
|
# TODO: Validate that `tool_call_id` is valid by scanning messages and checking if this tool call ID was invoked
|
158
112
|
add_message(role: tool_role, content: output, tool_call_id: tool_call_id)
|
@@ -183,6 +137,114 @@ module Langchain
|
|
183
137
|
|
184
138
|
private
|
185
139
|
|
140
|
+
# Check if the run is finished
|
141
|
+
#
|
142
|
+
# @param auto_tool_execution [Boolean] Whether or not to automatically run tools
|
143
|
+
# @return [Boolean] Whether the run is finished
|
144
|
+
def run_finished?(auto_tool_execution)
|
145
|
+
finished_states = [:completed, :failed]
|
146
|
+
|
147
|
+
requires_manual_action = (@state == :requires_action) && !auto_tool_execution
|
148
|
+
finished_states.include?(@state) || requires_manual_action
|
149
|
+
end
|
150
|
+
|
151
|
+
# Handle the current state and transition to the next state
|
152
|
+
#
|
153
|
+
# @param state [Symbol] The current state
|
154
|
+
# @return [Symbol] The next state
|
155
|
+
def handle_state
|
156
|
+
case @state
|
157
|
+
when :in_progress
|
158
|
+
process_latest_message
|
159
|
+
when :requires_action
|
160
|
+
execute_tools
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
# Process the latest message in the thread
|
165
|
+
#
|
166
|
+
# @return [Symbol] The next state
|
167
|
+
def process_latest_message
|
168
|
+
last_message = thread.messages.last
|
169
|
+
|
170
|
+
case last_message.standard_role
|
171
|
+
when :system
|
172
|
+
handle_system_message
|
173
|
+
when :llm
|
174
|
+
handle_llm_message
|
175
|
+
when :user, :tool
|
176
|
+
handle_user_or_tool_message
|
177
|
+
else
|
178
|
+
handle_unexpected_message
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
# Handle system message scenario
|
183
|
+
#
|
184
|
+
# @return [Symbol] The completed state
|
185
|
+
def handle_system_message
|
186
|
+
Langchain.logger.warn("At least one user message is required after a system message")
|
187
|
+
:completed
|
188
|
+
end
|
189
|
+
|
190
|
+
# Handle LLM message scenario
|
191
|
+
#
|
192
|
+
# @param auto_tool_execution [Boolean] Flag to indicate if tools should be executed automatically
|
193
|
+
# @return [Symbol] The next state
|
194
|
+
def handle_llm_message
|
195
|
+
thread.messages.last.tool_calls.any? ? :requires_action : :completed
|
196
|
+
end
|
197
|
+
|
198
|
+
# Handle unexpected message scenario
|
199
|
+
#
|
200
|
+
# @return [Symbol] The failed state
|
201
|
+
def handle_unexpected_message
|
202
|
+
Langchain.logger.error("Unexpected message role encountered: #{thread.messages.last.standard_role}")
|
203
|
+
:failed
|
204
|
+
end
|
205
|
+
|
206
|
+
# Handle user or tool message scenario by processing the LLM response
|
207
|
+
#
|
208
|
+
# @return [Symbol] The next state
|
209
|
+
def handle_user_or_tool_message
|
210
|
+
response = chat_with_llm
|
211
|
+
add_message(role: response.role, content: response.chat_completion, tool_calls: response.tool_calls)
|
212
|
+
|
213
|
+
if response.tool_calls.any?
|
214
|
+
:in_progress
|
215
|
+
elsif response.chat_completion
|
216
|
+
:completed
|
217
|
+
else
|
218
|
+
Langchain.logger.error("LLM response does not contain tool calls or chat completion")
|
219
|
+
:failed
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
# Execute the tools based on the tool calls in the last message
|
224
|
+
#
|
225
|
+
# @return [Symbol] The next state
|
226
|
+
def execute_tools
|
227
|
+
run_tools(thread.messages.last.tool_calls)
|
228
|
+
:in_progress
|
229
|
+
rescue => e
|
230
|
+
Langchain.logger.error("Error running tools: #{e.message}")
|
231
|
+
:failed
|
232
|
+
end
|
233
|
+
|
234
|
+
# Determine the tool role based on the LLM type
|
235
|
+
#
|
236
|
+
# @return [String] The tool role
|
237
|
+
def determine_tool_role
|
238
|
+
case llm
|
239
|
+
when Langchain::LLM::OpenAI
|
240
|
+
Langchain::Messages::OpenAIMessage::TOOL_ROLE
|
241
|
+
when Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI
|
242
|
+
Langchain::Messages::GoogleGeminiMessage::TOOL_ROLE
|
243
|
+
when Langchain::LLM::Anthropic
|
244
|
+
Langchain::Messages::AnthropicMessage::TOOL_ROLE
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
186
248
|
# Call to the LLM#chat() method
|
187
249
|
#
|
188
250
|
# @return [Langchain::LLM::BaseResponse] The LLM response object
|
@@ -232,14 +294,6 @@ module Langchain
|
|
232
294
|
|
233
295
|
submit_tool_output(tool_call_id: tool_call_id, output: output)
|
234
296
|
end
|
235
|
-
|
236
|
-
response = chat_with_llm
|
237
|
-
|
238
|
-
if response.tool_calls.any?
|
239
|
-
add_message(role: response.role, tool_calls: response.tool_calls)
|
240
|
-
elsif response.chat_completion
|
241
|
-
add_message(role: response.role, content: response.chat_completion)
|
242
|
-
end
|
243
297
|
end
|
244
298
|
|
245
299
|
# Extract the tool call information from the OpenAI tool call hash
|
@@ -7,10 +7,44 @@ module Langchain
|
|
7
7
|
|
8
8
|
# Check if the message came from a user
|
9
9
|
#
|
10
|
-
# @
|
10
|
+
# @return [Boolean] true/false whether the message came from a user
|
11
11
|
def user?
|
12
12
|
role == "user"
|
13
13
|
end
|
14
|
+
|
15
|
+
# Check if the message came from an LLM
|
16
|
+
#
|
17
|
+
# @raise NotImplementedError if the subclass does not implement this method
|
18
|
+
def llm?
|
19
|
+
raise NotImplementedError, "Class #{self.class.name} must implement the method 'llm?'"
|
20
|
+
end
|
21
|
+
|
22
|
+
# Check if the message is a tool call
|
23
|
+
#
|
24
|
+
# @raise NotImplementedError if the subclass does not implement this method
|
25
|
+
def tool?
|
26
|
+
raise NotImplementedError, "Class #{self.class.name} must implement the method 'tool?'"
|
27
|
+
end
|
28
|
+
|
29
|
+
# Check if the message is a system prompt
|
30
|
+
#
|
31
|
+
# @raise NotImplementedError if the subclass does not implement this method
|
32
|
+
def system?
|
33
|
+
raise NotImplementedError, "Class #{self.class.name} must implement the method 'system?'"
|
34
|
+
end
|
35
|
+
|
36
|
+
# Returns the standardized role symbol based on the specific role methods
|
37
|
+
#
|
38
|
+
# @return [Symbol] the standardized role symbol (:system, :llm, :tool, :user, or :unknown)
|
39
|
+
def standard_role
|
40
|
+
return :user if user?
|
41
|
+
return :llm if llm?
|
42
|
+
return :tool if tool?
|
43
|
+
return :system if system?
|
44
|
+
|
45
|
+
# TODO: Should we return :unknown or raise an error?
|
46
|
+
:unknown
|
47
|
+
end
|
14
48
|
end
|
15
49
|
end
|
16
50
|
end
|
@@ -18,7 +18,9 @@ module Langchain::LLM
|
|
18
18
|
|
19
19
|
chat_parameters.update(
|
20
20
|
model: {default: @defaults[:chat_completion_model_name]},
|
21
|
-
temperature: {default: @defaults[:temperature]}
|
21
|
+
temperature: {default: @defaults[:temperature]},
|
22
|
+
generation_config: {default: nil},
|
23
|
+
safety_settings: {default: nil}
|
22
24
|
)
|
23
25
|
chat_parameters.remap(
|
24
26
|
messages: :contents,
|
@@ -42,13 +44,25 @@ module Langchain::LLM
|
|
42
44
|
raise ArgumentError.new("messages argument is required") if Array(params[:messages]).empty?
|
43
45
|
|
44
46
|
parameters = chat_parameters.to_params(params)
|
45
|
-
parameters[:generation_config]
|
47
|
+
parameters[:generation_config] ||= {}
|
48
|
+
parameters[:generation_config][:temperature] ||= parameters[:temperature] if parameters[:temperature]
|
49
|
+
parameters.delete(:temperature)
|
50
|
+
parameters[:generation_config][:top_p] ||= parameters[:top_p] if parameters[:top_p]
|
51
|
+
parameters.delete(:top_p)
|
52
|
+
parameters[:generation_config][:top_k] ||= parameters[:top_k] if parameters[:top_k]
|
53
|
+
parameters.delete(:top_k)
|
54
|
+
parameters[:generation_config][:max_output_tokens] ||= parameters[:max_tokens] if parameters[:max_tokens]
|
55
|
+
parameters.delete(:max_tokens)
|
56
|
+
parameters[:generation_config][:response_mime_type] ||= parameters[:response_format] if parameters[:response_format]
|
57
|
+
parameters.delete(:response_format)
|
58
|
+
parameters[:generation_config][:stop_sequences] ||= parameters[:stop] if parameters[:stop]
|
59
|
+
parameters.delete(:stop)
|
46
60
|
|
47
61
|
uri = URI("https://generativelanguage.googleapis.com/v1beta/models/#{parameters[:model]}:generateContent?key=#{api_key}")
|
48
62
|
|
49
63
|
request = Net::HTTP::Post.new(uri)
|
50
64
|
request.content_type = "application/json"
|
51
|
-
request.body = parameters.to_json
|
65
|
+
request.body = Langchain::Utils::HashTransformer.deep_transform_keys(parameters) { |key| Langchain::Utils::HashTransformer.camelize_lower(key.to_s).to_sym }.to_json
|
52
66
|
|
53
67
|
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
|
54
68
|
http.request(request)
|
@@ -64,7 +64,7 @@ module Langchain::LLM
|
|
64
64
|
# Generate a completion for a given prompt
|
65
65
|
#
|
66
66
|
# @param prompt [String] The prompt to generate a completion for
|
67
|
-
# @return [Langchain::LLM::ReplicateResponse]
|
67
|
+
# @return [Langchain::LLM::ReplicateResponse] Response object
|
68
68
|
#
|
69
69
|
def complete(prompt:, **params)
|
70
70
|
response = completion_model.predict(prompt: prompt)
|
data/lib/langchain/loader.rb
CHANGED
@@ -90,7 +90,9 @@ module Langchain
|
|
90
90
|
private
|
91
91
|
|
92
92
|
def load_from_url
|
93
|
-
URI.
|
93
|
+
unescaped_url = URI.decode_www_form_component(@path)
|
94
|
+
escaped_url = URI::DEFAULT_PARSER.escape(unescaped_url)
|
95
|
+
URI.parse(escaped_url).open
|
94
96
|
end
|
95
97
|
|
96
98
|
def load_from_path
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module Langchain
|
2
|
+
module Utils
|
3
|
+
class HashTransformer
|
4
|
+
# Converts a string to camelCase
|
5
|
+
def self.camelize_lower(str)
|
6
|
+
str.split("_").inject([]) { |buffer, e| buffer.push(buffer.empty? ? e : e.capitalize) }.join
|
7
|
+
end
|
8
|
+
|
9
|
+
# Recursively transforms the keys of a hash to camel case
|
10
|
+
def self.deep_transform_keys(hash, &block)
|
11
|
+
case hash
|
12
|
+
when Hash
|
13
|
+
hash.each_with_object({}) do |(key, value), result|
|
14
|
+
new_key = block.call(key)
|
15
|
+
result[new_key] = deep_transform_keys(value, &block)
|
16
|
+
end
|
17
|
+
when Array
|
18
|
+
hash.map { |item| deep_transform_keys(item, &block) }
|
19
|
+
else
|
20
|
+
hash
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -64,7 +64,9 @@ module Langchain::Vectorsearch
|
|
64
64
|
# @param ids [Array<String>] The list of ids to remove
|
65
65
|
# @return [Hash] The response from the server
|
66
66
|
def remove_texts(ids:)
|
67
|
-
collection.delete(
|
67
|
+
collection.delete(
|
68
|
+
ids: ids.map(&:to_s)
|
69
|
+
)
|
68
70
|
end
|
69
71
|
|
70
72
|
# Create the collection with the default schema
|
@@ -6,7 +6,7 @@ module Langchain::Vectorsearch
|
|
6
6
|
# Wrapper around Milvus REST APIs.
|
7
7
|
#
|
8
8
|
# Gem requirements:
|
9
|
-
# gem "milvus", "~> 0.9.
|
9
|
+
# gem "milvus", "~> 0.9.3"
|
10
10
|
#
|
11
11
|
# Usage:
|
12
12
|
# milvus = Langchain::Vectorsearch::Milvus.new(url:, index_name:, llm:, api_key:)
|
@@ -39,6 +39,21 @@ module Langchain::Vectorsearch
|
|
39
39
|
)
|
40
40
|
end
|
41
41
|
|
42
|
+
# Deletes a list of texts in the index
|
43
|
+
#
|
44
|
+
# @param ids [Array<Integer>] The ids of texts to delete
|
45
|
+
# @return [Boolean] The response from the server
|
46
|
+
def remove_texts(ids:)
|
47
|
+
raise ArgumentError, "ids must be an array" unless ids.is_a?(Array)
|
48
|
+
# Convert ids to integers if strings are passed
|
49
|
+
ids = ids.map(&:to_i)
|
50
|
+
|
51
|
+
client.entities.delete(
|
52
|
+
collection_name: index_name,
|
53
|
+
expression: "id in #{ids}"
|
54
|
+
)
|
55
|
+
end
|
56
|
+
|
42
57
|
# TODO: Add update_texts method
|
43
58
|
|
44
59
|
# Create default schema
|
@@ -83,7 +98,7 @@ module Langchain::Vectorsearch
|
|
83
98
|
# @return [Boolean] The response from the server
|
84
99
|
def create_default_index
|
85
100
|
client.indices.create(
|
86
|
-
collection_name:
|
101
|
+
collection_name: index_name,
|
87
102
|
field_name: "vectors",
|
88
103
|
extra_params: [
|
89
104
|
{key: "metric_type", value: "L2"},
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-07-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -408,14 +408,14 @@ dependencies:
|
|
408
408
|
requirements:
|
409
409
|
- - "~>"
|
410
410
|
- !ruby/object:Gem::Version
|
411
|
-
version: 0.9.
|
411
|
+
version: 0.9.3
|
412
412
|
type: :development
|
413
413
|
prerelease: false
|
414
414
|
version_requirements: !ruby/object:Gem::Requirement
|
415
415
|
requirements:
|
416
416
|
- - "~>"
|
417
417
|
- !ruby/object:Gem::Version
|
418
|
-
version: 0.9.
|
418
|
+
version: 0.9.3
|
419
419
|
- !ruby/object:Gem::Dependency
|
420
420
|
name: llama_cpp
|
421
421
|
requirement: !ruby/object:Gem::Requirement
|
@@ -809,6 +809,7 @@ files:
|
|
809
809
|
- lib/langchain/tool/wikipedia/wikipedia.json
|
810
810
|
- lib/langchain/tool/wikipedia/wikipedia.rb
|
811
811
|
- lib/langchain/utils/cosine_similarity.rb
|
812
|
+
- lib/langchain/utils/hash_transformer.rb
|
812
813
|
- lib/langchain/utils/token_length/ai21_validator.rb
|
813
814
|
- lib/langchain/utils/token_length/base_validator.rb
|
814
815
|
- lib/langchain/utils/token_length/cohere_validator.rb
|
@@ -852,7 +853,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
852
853
|
- !ruby/object:Gem::Version
|
853
854
|
version: '0'
|
854
855
|
requirements: []
|
855
|
-
rubygems_version: 3.5.
|
856
|
+
rubygems_version: 3.5.14
|
856
857
|
signing_key:
|
857
858
|
specification_version: 4
|
858
859
|
summary: Build LLM-backed Ruby applications with Ruby's Langchain.rb
|