langchainrb 0.13.2 → 0.13.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/README.md +7 -4
- data/lib/langchain/assistants/assistant.rb +16 -4
- data/lib/langchain/contextual_logger.rb +2 -2
- data/lib/langchain/llm/aws_bedrock.rb +62 -9
- data/lib/langchain/llm/ollama.rb +63 -30
- data/lib/langchain/llm/openai.rb +21 -10
- data/lib/langchain/llm/response/anthropic_response.rb +1 -1
- data/lib/langchain/llm/response/ollama_response.rb +12 -8
- data/lib/langchain/loader.rb +3 -3
- data/lib/langchain/tool/news_retriever/news_retriever.rb +4 -1
- data/lib/langchain/tool/tavily/tavily.json +54 -0
- data/lib/langchain/tool/tavily/tavily.rb +62 -0
- data/lib/langchain/vectorsearch/chroma.rb +3 -3
- data/lib/langchain/vectorsearch/elasticsearch.rb +2 -2
- data/lib/langchain/vectorsearch/epsilla.rb +2 -2
- data/lib/langchain/vectorsearch/hnswlib.rb +2 -2
- data/lib/langchain/vectorsearch/milvus.rb +2 -2
- data/lib/langchain/vectorsearch/pgvector.rb +2 -2
- data/lib/langchain/vectorsearch/pinecone.rb +2 -2
- data/lib/langchain/vectorsearch/qdrant.rb +2 -2
- data/lib/langchain/vectorsearch/weaviate.rb +2 -2
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +1 -0
- metadata +29 -41
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7877086b6c4d0bba6c1fc4cafc156ff476ad83eb30df1a39b279885b224bf35d
|
4
|
+
data.tar.gz: 3a22b060896725308c5ce137ee5617a44a75355cfc7878ce6080e6b200722c51
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 05606b99693c0e81f3785a027e155205a3ffce8f4f236868395de837e2bc6f71661c39f6a2cc062e3c0a57a6c8295e13b6910df296a9092f2f7d0596e1c969b0
|
7
|
+
data.tar.gz: 00d478f82be9984a95a1d11676982dec79dea2a6c0bf92ceb1ab0e3309edac2ecee115a3dd46ca060f040e44d9b7c8623ffd27ba1d5fcd8182832f933eaf2815
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,15 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.13.4] - 2024-06-16
|
4
|
+
- Fix Chroma#remove_texts() method
|
5
|
+
- Fix NewsRetriever Tool returning non UTF-8 characters
|
6
|
+
- Misc fixes and improvements
|
7
|
+
|
8
|
+
## [0.13.3] - 2024-06-03
|
9
|
+
- New 🛠️ `Langchain::Tool::Tavily` to execute search (better than the GoogleSearch tool)
|
10
|
+
- Remove `activesupport` dependency
|
11
|
+
- Misc fixes and improvements
|
12
|
+
|
3
13
|
## [0.13.2] - 2024-05-20
|
4
14
|
- New `Langchain::LLM::GoogleGemini#embed()` method
|
5
15
|
- `Langchain::Assistant` works with `Langchain::LLM::Anthropic` llm
|
data/README.md
CHANGED
@@ -57,15 +57,16 @@ Langchain.rb wraps supported LLMs in a unified interface allowing you to easily
|
|
57
57
|
#### Supported LLMs and features:
|
58
58
|
| LLM providers | `embed()` | `complete()` | `chat()` | `summarize()` | Notes |
|
59
59
|
| -------- |:------------------:| :-------: | :-----------------: | :-------: | :----------------- |
|
60
|
-
| [OpenAI](https://openai.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ |
|
60
|
+
| [OpenAI](https://openai.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | Including Azure OpenAI |
|
61
61
|
| [AI21](https://ai21.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ❌ | ✅ | |
|
62
62
|
| [Anthropic](https://anthropic.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ✅ | ❌ | |
|
63
|
-
| [
|
63
|
+
| [AwsBedrock](https://aws.amazon.com/bedrock?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ❌ | Provides AWS, Cohere, AI21, Antropic and Stability AI models |
|
64
64
|
| [Cohere](https://cohere.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
|
65
65
|
| [GooglePalm](https://ai.google/discover/palm2?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
|
66
|
-
| [
|
66
|
+
| [GoogleVertexAI](https://cloud.google.com/vertex-ai?utm_source=langchainrb&utm_medium=github) | ✅ | ❌ | ✅ | ❌ | Requires Google Cloud service auth |
|
67
|
+
| [GoogleGemini](https://cloud.google.com/vertex-ai?utm_source=langchainrb&utm_medium=github) | ✅ | ❌ | ✅ | ❌ | Requires Gemini API Key (Limited to US) |
|
67
68
|
| [HuggingFace](https://huggingface.co/?utm_source=langchainrb&utm_medium=github) | ✅ | ❌ | ❌ | ❌ | |
|
68
|
-
| [
|
69
|
+
| [MistralAI](https://mistral.ai/?utm_source=langchainrb&utm_medium=github) | ✅ | ❌ | ✅ | ❌ | |
|
69
70
|
| [Ollama](https://ollama.ai/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
|
70
71
|
| [Replicate](https://replicate.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
|
71
72
|
|
@@ -413,12 +414,14 @@ Assistants are Agent-like objects that leverage helpful instructions, LLMs, tool
|
|
413
414
|
| "ruby_code_interpreter" | Interprets Ruby expressions | | `gem "safe_ruby", "~> 1.0.4"` |
|
414
415
|
| "google_search" | A wrapper around Google Search | `ENV["SERPAPI_API_KEY"]` (https://serpapi.com/manage-api-key) | `gem "google_search_results", "~> 2.0.0"` |
|
415
416
|
| "news_retriever" | A wrapper around NewsApi.org | `ENV["NEWS_API_KEY"]` (https://newsapi.org/) | |
|
417
|
+
| "tavily" | A wrapper around Tavily AI | `ENV["TAVILY_API_KEY"]` (https://tavily.com/) | |
|
416
418
|
| "weather" | Calls Open Weather API to retrieve the current weather | `ENV["OPEN_WEATHER_API_KEY"]` (https://home.openweathermap.org/api_keys) | `gem "open-weather-ruby-client", "~> 0.3.0"` |
|
417
419
|
| "wikipedia" | Calls Wikipedia API to retrieve the summary | | `gem "wikipedia-client", "~> 1.17.0"` |
|
418
420
|
|
419
421
|
### Demos
|
420
422
|
1. [Building an AI Assistant that operates a simulated E-commerce Store](https://www.loom.com/share/83aa4fd8dccb492aad4ca95da40ed0b2)
|
421
423
|
2. [New Langchain.rb Assistants interface](https://www.loom.com/share/e883a4a49b8746c1b0acf9d58cf6da36)
|
424
|
+
3. [Langchain.rb Assistant demo with NewsRetriever and function calling on Gemini](https://youtu.be/-ieyahrpDpM&t=1477s) - [code](https://github.com/palladius/gemini-news-crawler)
|
422
425
|
|
423
426
|
### Creating an Assistant
|
424
427
|
1. Instantiate an LLM of your choice
|
@@ -2,8 +2,19 @@
|
|
2
2
|
|
3
3
|
module Langchain
|
4
4
|
# Assistants are Agent-like objects that leverage helpful instructions, LLMs, tools and knowledge to respond to user queries.
|
5
|
-
# Assistants can be configured with an LLM of your choice
|
5
|
+
# Assistants can be configured with an LLM of your choice, any vector search database and easily extended with additional tools.
|
6
|
+
#
|
7
|
+
# Usage:
|
8
|
+
# llm = Langchain::LLM::GoogleGemini.new(api_key: ENV["GOOGLE_GEMINI_API_KEY"])
|
9
|
+
# assistant = Langchain::Assistant.new(
|
10
|
+
# llm: llm,
|
11
|
+
# instructions: "You're a News Reporter AI",
|
12
|
+
# tools: [Langchain::Tool::NewsRetriever.new(api_key: ENV["NEWS_API_KEY"])]
|
13
|
+
# )
|
6
14
|
class Assistant
|
15
|
+
extend Forwardable
|
16
|
+
def_delegators :thread, :messages, :messages=
|
17
|
+
|
7
18
|
attr_reader :llm, :thread, :instructions
|
8
19
|
attr_accessor :tools
|
9
20
|
|
@@ -22,21 +33,22 @@ module Langchain
|
|
22
33
|
# @param instructions [String] The system instructions to include in the thread
|
23
34
|
def initialize(
|
24
35
|
llm:,
|
25
|
-
thread
|
36
|
+
thread: nil,
|
26
37
|
tools: [],
|
27
38
|
instructions: nil
|
28
39
|
)
|
29
40
|
unless SUPPORTED_LLMS.include?(llm.class)
|
30
41
|
raise ArgumentError, "Invalid LLM; currently only #{SUPPORTED_LLMS.join(", ")} are supported"
|
31
42
|
end
|
32
|
-
raise ArgumentError, "Thread must be an instance of Langchain::Thread" unless thread.is_a?(Langchain::Thread)
|
33
43
|
raise ArgumentError, "Tools must be an array of Langchain::Tool::Base instance(s)" unless tools.is_a?(Array) && tools.all? { |tool| tool.is_a?(Langchain::Tool::Base) }
|
34
44
|
|
35
45
|
@llm = llm
|
36
|
-
@thread = thread
|
46
|
+
@thread = thread || Langchain::Thread.new
|
37
47
|
@tools = tools
|
38
48
|
@instructions = instructions
|
39
49
|
|
50
|
+
raise ArgumentError, "Thread must be an instance of Langchain::Thread" unless @thread.is_a?(Langchain::Thread)
|
51
|
+
|
40
52
|
# The first message in the thread should be the system instructions
|
41
53
|
# TODO: What if the user added old messages and the system instructions are already in there? Should this overwrite the existing instructions?
|
42
54
|
if llm.is_a?(Langchain::LLM::OpenAI)
|
@@ -35,8 +35,8 @@ module Langchain
|
|
35
35
|
@logger.respond_to?(method, include_private)
|
36
36
|
end
|
37
37
|
|
38
|
-
def method_missing(method, *args, **kwargs, &
|
39
|
-
return @logger.send(method, *args, **kwargs, &
|
38
|
+
def method_missing(method, *args, **kwargs, &)
|
39
|
+
return @logger.send(method, *args, **kwargs, &) unless @levels.include?(method)
|
40
40
|
|
41
41
|
for_class = kwargs.delete(:for)
|
42
42
|
for_class_name = for_class&.name
|
@@ -135,22 +135,43 @@ module Langchain::LLM
|
|
135
135
|
# @option params [Float] :temperature The temperature to use for completion
|
136
136
|
# @option params [Float] :top_p Use nucleus sampling.
|
137
137
|
# @option params [Integer] :top_k Only sample from the top K options for each subsequent token
|
138
|
-
# @
|
139
|
-
|
138
|
+
# @yield [Hash] Provides chunks of the response as they are received
|
139
|
+
# @return [Langchain::LLM::AnthropicResponse] Response object
|
140
|
+
def chat(params = {}, &block)
|
140
141
|
parameters = chat_parameters.to_params(params)
|
141
142
|
|
142
143
|
raise ArgumentError.new("messages argument is required") if Array(parameters[:messages]).empty?
|
143
144
|
|
144
145
|
raise "Model #{parameters[:model]} does not support chat completions." unless Langchain::LLM::AwsBedrock::SUPPORTED_CHAT_COMPLETION_PROVIDERS.include?(completion_provider)
|
145
146
|
|
146
|
-
|
147
|
-
|
148
|
-
body: parameters.except(:model).to_json,
|
149
|
-
content_type: "application/json",
|
150
|
-
accept: "application/json"
|
151
|
-
})
|
147
|
+
if block
|
148
|
+
response_chunks = []
|
152
149
|
|
153
|
-
|
150
|
+
client.invoke_model_with_response_stream(
|
151
|
+
model_id: parameters[:model],
|
152
|
+
body: parameters.except(:model).to_json,
|
153
|
+
content_type: "application/json",
|
154
|
+
accept: "application/json"
|
155
|
+
) do |stream|
|
156
|
+
stream.on_event do |event|
|
157
|
+
chunk = JSON.parse(event.bytes)
|
158
|
+
response_chunks << chunk
|
159
|
+
|
160
|
+
yield chunk
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
response_from_chunks(response_chunks)
|
165
|
+
else
|
166
|
+
response = client.invoke_model({
|
167
|
+
model_id: parameters[:model],
|
168
|
+
body: parameters.except(:model).to_json,
|
169
|
+
content_type: "application/json",
|
170
|
+
accept: "application/json"
|
171
|
+
})
|
172
|
+
|
173
|
+
parse_response response
|
174
|
+
end
|
154
175
|
end
|
155
176
|
|
156
177
|
private
|
@@ -260,5 +281,37 @@ module Langchain::LLM
|
|
260
281
|
}
|
261
282
|
}
|
262
283
|
end
|
284
|
+
|
285
|
+
def response_from_chunks(chunks)
|
286
|
+
raw_response = {}
|
287
|
+
|
288
|
+
chunks.group_by { |chunk| chunk["type"] }.each do |type, chunks|
|
289
|
+
case type
|
290
|
+
when "message_start"
|
291
|
+
raw_response = chunks.first["message"]
|
292
|
+
when "content_block_start"
|
293
|
+
raw_response["content"] = chunks.map { |chunk| chunk["content_block"] }
|
294
|
+
when "content_block_delta"
|
295
|
+
chunks.group_by { |chunk| chunk["index"] }.each do |index, deltas|
|
296
|
+
deltas.group_by { |delta| delta.dig("delta", "type") }.each do |type, deltas|
|
297
|
+
case type
|
298
|
+
when "text_delta"
|
299
|
+
raw_response["content"][index]["text"] = deltas.map { |delta| delta.dig("delta", "text") }.join
|
300
|
+
when "input_json_delta"
|
301
|
+
json_string = deltas.map { |delta| delta.dig("delta", "partial_json") }.join
|
302
|
+
raw_response["content"][index]["input"] = json_string.empty? ? {} : JSON.parse(json_string)
|
303
|
+
end
|
304
|
+
end
|
305
|
+
end
|
306
|
+
when "message_delta"
|
307
|
+
chunks.each do |chunk|
|
308
|
+
raw_response = raw_response.merge(chunk["delta"])
|
309
|
+
raw_response["usage"] = raw_response["usage"].merge(chunk["usage"]) if chunk["usage"]
|
310
|
+
end
|
311
|
+
end
|
312
|
+
end
|
313
|
+
|
314
|
+
Langchain::LLM::AnthropicResponse.new(raw_response)
|
315
|
+
end
|
263
316
|
end
|
264
317
|
end
|
data/lib/langchain/llm/ollama.rb
CHANGED
@@ -65,8 +65,14 @@ module Langchain::LLM
|
|
65
65
|
# @param model [String] The model to use
|
66
66
|
# For a list of valid parameters and values, see:
|
67
67
|
# https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
68
|
+
# @option block [Proc] Receive the intermediate responses as a stream of +OllamaResponse+ objects.
|
68
69
|
# @return [Langchain::LLM::OllamaResponse] Response object
|
69
70
|
#
|
71
|
+
# Example:
|
72
|
+
#
|
73
|
+
# final_resp = ollama.complete(prompt:) { |resp| print resp.completion }
|
74
|
+
# final_resp.total_tokens
|
75
|
+
#
|
70
76
|
def complete(
|
71
77
|
prompt:,
|
72
78
|
model: defaults[:completion_model_name],
|
@@ -75,7 +81,6 @@ module Langchain::LLM
|
|
75
81
|
system: nil,
|
76
82
|
template: nil,
|
77
83
|
context: nil,
|
78
|
-
stream: nil,
|
79
84
|
raw: nil,
|
80
85
|
mirostat: nil,
|
81
86
|
mirostat_eta: nil,
|
@@ -108,7 +113,7 @@ module Langchain::LLM
|
|
108
113
|
system: system,
|
109
114
|
template: template,
|
110
115
|
context: context,
|
111
|
-
stream:
|
116
|
+
stream: block.present?,
|
112
117
|
raw: raw
|
113
118
|
}.compact
|
114
119
|
|
@@ -132,53 +137,54 @@ module Langchain::LLM
|
|
132
137
|
}
|
133
138
|
|
134
139
|
parameters[:options] = llm_parameters.compact
|
140
|
+
responses_stream = []
|
135
141
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
req.body = parameters
|
142
|
+
client.post("api/generate", parameters) do |req|
|
143
|
+
req.options.on_data = json_responses_chunk_handler do |parsed_chunk|
|
144
|
+
responses_stream << parsed_chunk
|
140
145
|
|
141
|
-
|
142
|
-
chunk.split("\n").each do |line_chunk|
|
143
|
-
json_chunk = begin
|
144
|
-
JSON.parse(line_chunk)
|
145
|
-
# In some instance the chunk exceeds the buffer size and the JSON parser fails
|
146
|
-
rescue JSON::ParserError
|
147
|
-
nil
|
148
|
-
end
|
149
|
-
|
150
|
-
response += json_chunk.dig("response") unless json_chunk.blank?
|
151
|
-
end
|
152
|
-
|
153
|
-
yield json_chunk, size if block
|
146
|
+
block&.call(OllamaResponse.new(parsed_chunk, model: parameters[:model]))
|
154
147
|
end
|
155
148
|
end
|
156
149
|
|
157
|
-
|
150
|
+
generate_final_completion_response(responses_stream, parameters)
|
158
151
|
end
|
159
152
|
|
160
153
|
# Generate a chat completion
|
161
154
|
#
|
162
|
-
# @param [
|
163
|
-
# @
|
155
|
+
# @param messages [Array] The chat messages
|
156
|
+
# @param model [String] The model to use
|
157
|
+
# @param params [Hash] Unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA]
|
164
158
|
# @option params [Array<Hash>] :messages Array of messages
|
159
|
+
# @option params [String] :model Model name
|
165
160
|
# @option params [String] :format Format to return a response in. Currently the only accepted value is `json`
|
166
161
|
# @option params [Float] :temperature The temperature to use
|
167
162
|
# @option params [String] :template The prompt template to use (overrides what is defined in the `Modelfile`)
|
168
|
-
# @option
|
163
|
+
# @option block [Proc] Receive the intermediate responses as a stream of +OllamaResponse+ objects.
|
164
|
+
# @return [Langchain::LLM::OllamaResponse] Response object
|
165
|
+
#
|
166
|
+
# Example:
|
167
|
+
#
|
168
|
+
# final_resp = ollama.chat(messages:) { |resp| print resp.chat_completion }
|
169
|
+
# final_resp.total_tokens
|
169
170
|
#
|
170
171
|
# The message object has the following fields:
|
171
172
|
# role: the role of the message, either system, user or assistant
|
172
173
|
# content: the content of the message
|
173
174
|
# images (optional): a list of images to include in the message (for multimodal models such as llava)
|
174
|
-
def chat(params
|
175
|
-
parameters = chat_parameters.to_params(params)
|
175
|
+
def chat(messages:, model: nil, **params, &block)
|
176
|
+
parameters = chat_parameters.to_params(params.merge(messages:, model:, stream: block.present?))
|
177
|
+
responses_stream = []
|
176
178
|
|
177
|
-
|
178
|
-
req.
|
179
|
+
client.post("api/chat", parameters) do |req|
|
180
|
+
req.options.on_data = json_responses_chunk_handler do |parsed_chunk|
|
181
|
+
responses_stream << parsed_chunk
|
182
|
+
|
183
|
+
block&.call(OllamaResponse.new(parsed_chunk, model: parameters[:model]))
|
184
|
+
end
|
179
185
|
end
|
180
186
|
|
181
|
-
|
187
|
+
generate_final_chat_completion_response(responses_stream, parameters)
|
182
188
|
end
|
183
189
|
|
184
190
|
#
|
@@ -239,7 +245,7 @@ module Langchain::LLM
|
|
239
245
|
req.body = parameters
|
240
246
|
end
|
241
247
|
|
242
|
-
|
248
|
+
OllamaResponse.new(response.body, model: parameters[:model])
|
243
249
|
end
|
244
250
|
|
245
251
|
# Generate a summary for a given text
|
@@ -257,7 +263,6 @@ module Langchain::LLM
|
|
257
263
|
|
258
264
|
private
|
259
265
|
|
260
|
-
# @return [Faraday::Connection] Faraday client
|
261
266
|
def client
|
262
267
|
@client ||= Faraday.new(url: url) do |conn|
|
263
268
|
conn.request :json
|
@@ -265,5 +270,33 @@ module Langchain::LLM
|
|
265
270
|
conn.response :raise_error
|
266
271
|
end
|
267
272
|
end
|
273
|
+
|
274
|
+
def json_responses_chunk_handler(&block)
|
275
|
+
proc do |chunk, _size|
|
276
|
+
chunk.split("\n").each do |chunk_line|
|
277
|
+
parsed_chunk = JSON.parse(chunk_line)
|
278
|
+
block.call(parsed_chunk)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
def generate_final_completion_response(responses_stream, parameters)
|
284
|
+
final_response = responses_stream.last.merge(
|
285
|
+
"response" => responses_stream.map { |resp| resp["response"] }.join
|
286
|
+
)
|
287
|
+
|
288
|
+
OllamaResponse.new(final_response, model: parameters[:model])
|
289
|
+
end
|
290
|
+
|
291
|
+
def generate_final_chat_completion_response(responses_stream, parameters)
|
292
|
+
final_response = responses_stream.last.merge(
|
293
|
+
"message" => {
|
294
|
+
"role" => "assistant",
|
295
|
+
"content" => responses_stream.map { |resp| resp.dig("message", "content") }.join
|
296
|
+
}
|
297
|
+
)
|
298
|
+
|
299
|
+
OllamaResponse.new(final_response, model: parameters[:model])
|
300
|
+
end
|
268
301
|
end
|
269
302
|
end
|
data/lib/langchain/llm/openai.rb
CHANGED
@@ -26,8 +26,6 @@ module Langchain::LLM
|
|
26
26
|
"text-embedding-3-small" => 1536
|
27
27
|
}.freeze
|
28
28
|
|
29
|
-
LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
|
30
|
-
|
31
29
|
attr_reader :defaults
|
32
30
|
|
33
31
|
# Initialize an OpenAI LLM instance
|
@@ -82,8 +80,6 @@ module Langchain::LLM
|
|
82
80
|
parameters[:dimensions] = EMBEDDING_SIZES[model]
|
83
81
|
end
|
84
82
|
|
85
|
-
validate_max_tokens(text, parameters[:model])
|
86
|
-
|
87
83
|
response = with_api_error_handling do
|
88
84
|
client.embeddings(parameters: parameters)
|
89
85
|
end
|
@@ -177,10 +173,6 @@ module Langchain::LLM
|
|
177
173
|
response
|
178
174
|
end
|
179
175
|
|
180
|
-
def validate_max_tokens(messages, model, max_tokens = nil)
|
181
|
-
LENGTH_VALIDATOR.validate_max_tokens!(messages, model, max_tokens: max_tokens, llm: self)
|
182
|
-
end
|
183
|
-
|
184
176
|
def response_from_chunks
|
185
177
|
grouped_chunks = @response_chunks.group_by { |chunk| chunk.dig("choices", 0, "index") }
|
186
178
|
final_choices = grouped_chunks.map do |index, chunks|
|
@@ -188,12 +180,31 @@ module Langchain::LLM
|
|
188
180
|
"index" => index,
|
189
181
|
"message" => {
|
190
182
|
"role" => "assistant",
|
191
|
-
"content" => chunks.map { |chunk| chunk.dig("choices", 0, "delta", "content") }.join
|
192
|
-
|
183
|
+
"content" => chunks.map { |chunk| chunk.dig("choices", 0, "delta", "content") }.join,
|
184
|
+
"tool_calls" => tool_calls_from_choice_chunks(chunks)
|
185
|
+
}.compact,
|
193
186
|
"finish_reason" => chunks.last.dig("choices", 0, "finish_reason")
|
194
187
|
}
|
195
188
|
end
|
196
189
|
@response_chunks.first&.slice("id", "object", "created", "model")&.merge({"choices" => final_choices})
|
197
190
|
end
|
191
|
+
|
192
|
+
def tool_calls_from_choice_chunks(choice_chunks)
|
193
|
+
tool_call_chunks = choice_chunks.select { |chunk| chunk.dig("choices", 0, "delta", "tool_calls") }
|
194
|
+
return nil if tool_call_chunks.empty?
|
195
|
+
|
196
|
+
tool_call_chunks.group_by { |chunk| chunk.dig("choices", 0, "delta", "tool_calls", 0, "index") }.map do |index, chunks|
|
197
|
+
first_chunk = chunks.first
|
198
|
+
|
199
|
+
{
|
200
|
+
"id" => first_chunk.dig("choices", 0, "delta", "tool_calls", 0, "id"),
|
201
|
+
"type" => first_chunk.dig("choices", 0, "delta", "tool_calls", 0, "type"),
|
202
|
+
"function" => {
|
203
|
+
"name" => first_chunk.dig("choices", 0, "delta", "tool_calls", 0, "function", "name"),
|
204
|
+
"arguments" => chunks.map { |chunk| chunk.dig("choices", 0, "delta", "tool_calls", 0, "function", "arguments") }.join
|
205
|
+
}
|
206
|
+
}
|
207
|
+
end
|
208
|
+
end
|
198
209
|
end
|
199
210
|
end
|
@@ -8,9 +8,7 @@ module Langchain::LLM
|
|
8
8
|
end
|
9
9
|
|
10
10
|
def created_at
|
11
|
-
if raw_response.dig("created_at")
|
12
|
-
Time.parse(raw_response.dig("created_at"))
|
13
|
-
end
|
11
|
+
Time.parse(raw_response.dig("created_at")) if raw_response.dig("created_at")
|
14
12
|
end
|
15
13
|
|
16
14
|
def chat_completion
|
@@ -18,11 +16,11 @@ module Langchain::LLM
|
|
18
16
|
end
|
19
17
|
|
20
18
|
def completion
|
21
|
-
|
19
|
+
raw_response.dig("response")
|
22
20
|
end
|
23
21
|
|
24
22
|
def completions
|
25
|
-
|
23
|
+
[completion].compact
|
26
24
|
end
|
27
25
|
|
28
26
|
def embedding
|
@@ -38,15 +36,21 @@ module Langchain::LLM
|
|
38
36
|
end
|
39
37
|
|
40
38
|
def prompt_tokens
|
41
|
-
raw_response.dig("prompt_eval_count")
|
39
|
+
raw_response.dig("prompt_eval_count") if done?
|
42
40
|
end
|
43
41
|
|
44
42
|
def completion_tokens
|
45
|
-
raw_response.dig("eval_count")
|
43
|
+
raw_response.dig("eval_count") if done?
|
46
44
|
end
|
47
45
|
|
48
46
|
def total_tokens
|
49
|
-
prompt_tokens + completion_tokens
|
47
|
+
prompt_tokens + completion_tokens if done?
|
48
|
+
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def done?
|
53
|
+
!!raw_response["done"]
|
50
54
|
end
|
51
55
|
end
|
52
56
|
end
|
data/lib/langchain/loader.rb
CHANGED
@@ -90,7 +90,7 @@ module Langchain
|
|
90
90
|
private
|
91
91
|
|
92
92
|
def load_from_url
|
93
|
-
URI.parse(@path).open
|
93
|
+
URI.parse(URI::DEFAULT_PARSER.escape(@path)).open
|
94
94
|
end
|
95
95
|
|
96
96
|
def load_from_path
|
@@ -105,7 +105,7 @@ module Langchain
|
|
105
105
|
# Only load and add to result files with supported extensions
|
106
106
|
Langchain::Loader.new(file, @options).load(&block)
|
107
107
|
rescue
|
108
|
-
UnknownFormatError
|
108
|
+
UnknownFormatError.new("Unknown format: #{source_type}")
|
109
109
|
end.flatten.compact
|
110
110
|
end
|
111
111
|
# rubocop:enable Style/ArgumentsForwarding
|
@@ -123,7 +123,7 @@ module Langchain
|
|
123
123
|
end
|
124
124
|
|
125
125
|
def processor_klass
|
126
|
-
raise UnknownFormatError unless (kind = find_processor)
|
126
|
+
raise UnknownFormatError.new("Unknown format: #{source_type}") unless (kind = find_processor)
|
127
127
|
|
128
128
|
Langchain::Processors.const_get(kind)
|
129
129
|
end
|
@@ -126,7 +126,10 @@ module Langchain::Tool
|
|
126
126
|
request["Content-Type"] = "application/json"
|
127
127
|
|
128
128
|
response = http.request(request)
|
129
|
-
response
|
129
|
+
response
|
130
|
+
.body
|
131
|
+
# Remove non-UTF-8 characters
|
132
|
+
.force_encoding(Encoding::UTF_8)
|
130
133
|
end
|
131
134
|
end
|
132
135
|
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
[
|
2
|
+
{
|
3
|
+
"type": "function",
|
4
|
+
"function": {
|
5
|
+
"name": "tavily__search",
|
6
|
+
"description": "Tavily Tool: Robust search API",
|
7
|
+
"parameters": {
|
8
|
+
"type": "object",
|
9
|
+
"properties": {
|
10
|
+
"query": {
|
11
|
+
"type": "string",
|
12
|
+
"description": "The search query string"
|
13
|
+
},
|
14
|
+
"search_depth": {
|
15
|
+
"type": "string",
|
16
|
+
"description": "The depth of the search: basic for quick results and advanced for indepth high quality results but longer response time",
|
17
|
+
"enum": ["basic", "advanced"]
|
18
|
+
},
|
19
|
+
"include_images": {
|
20
|
+
"type": "boolean",
|
21
|
+
"description": "Include a list of query related images in the response"
|
22
|
+
},
|
23
|
+
"include_answer": {
|
24
|
+
"type": "boolean",
|
25
|
+
"description": "Include answers in the search results"
|
26
|
+
},
|
27
|
+
"include_raw_content": {
|
28
|
+
"type": "boolean",
|
29
|
+
"description": "Include raw content in the search results"
|
30
|
+
},
|
31
|
+
"max_results": {
|
32
|
+
"type": "integer",
|
33
|
+
"description": "The number of maximum search results to return"
|
34
|
+
},
|
35
|
+
"include_domains": {
|
36
|
+
"type": "array",
|
37
|
+
"items": {
|
38
|
+
"type": "string"
|
39
|
+
},
|
40
|
+
"description": "A list of domains to specifically include in the search results"
|
41
|
+
},
|
42
|
+
"exclude_domains": {
|
43
|
+
"type": "array",
|
44
|
+
"items": {
|
45
|
+
"type": "string"
|
46
|
+
},
|
47
|
+
"description": "A list of domains to specifically exclude from the search results"
|
48
|
+
}
|
49
|
+
},
|
50
|
+
"required": ["query"]
|
51
|
+
}
|
52
|
+
}
|
53
|
+
}
|
54
|
+
]
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::Tool
|
4
|
+
class Tavily < Base
|
5
|
+
#
|
6
|
+
# Tavily Search is a robust search API tailored specifically for LLM Agents.
|
7
|
+
# It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.
|
8
|
+
#
|
9
|
+
# Usage:
|
10
|
+
# tavily = Langchain::Tool::Tavily.new(api_key: ENV["TAVILY_API_KEY"])
|
11
|
+
#
|
12
|
+
NAME = "tavily"
|
13
|
+
ANNOTATIONS_PATH = Langchain.root.join("./langchain/tool/#{NAME}/#{NAME}.json").to_path
|
14
|
+
|
15
|
+
def initialize(api_key:)
|
16
|
+
@api_key = api_key
|
17
|
+
end
|
18
|
+
|
19
|
+
# Search for data based on a query.
|
20
|
+
#
|
21
|
+
# @param query [String] The search query string.
|
22
|
+
# @param search_depth [String] The depth of the search. It can be basic or advanced. Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.
|
23
|
+
# @param include_images [Boolean] Include a list of query related images in the response. Default is False.
|
24
|
+
# @param include_answer [Boolean] Include answers in the search results. Default is False.
|
25
|
+
# @param include_raw_content [Boolean] Include raw content in the search results. Default is False.
|
26
|
+
# @param max_results [Integer] The number of maximum search results to return. Default is 5.
|
27
|
+
# @param include_domains [Array<String>] A list of domains to specifically include in the search results. Default is None, which includes all domains.
|
28
|
+
# @param exclude_domains [Array<String>] A list of domains to specifically exclude from the search results. Default is None, which doesn't exclude any domains.
|
29
|
+
#
|
30
|
+
# @return [String] The search results in JSON format.
|
31
|
+
def search(
|
32
|
+
query:,
|
33
|
+
search_depth: "basic",
|
34
|
+
include_images: false,
|
35
|
+
include_answer: false,
|
36
|
+
include_raw_content: false,
|
37
|
+
max_results: 5,
|
38
|
+
include_domains: [],
|
39
|
+
exclude_domains: []
|
40
|
+
)
|
41
|
+
uri = URI("https://api.tavily.com/search")
|
42
|
+
request = Net::HTTP::Post.new(uri)
|
43
|
+
request.content_type = "application/json"
|
44
|
+
request.body = {
|
45
|
+
api_key: @api_key,
|
46
|
+
query: query,
|
47
|
+
search_depth: search_depth,
|
48
|
+
include_images: include_images,
|
49
|
+
include_answer: include_answer,
|
50
|
+
include_raw_content: include_raw_content,
|
51
|
+
max_results: max_results,
|
52
|
+
include_domains: include_domains,
|
53
|
+
exclude_domains: exclude_domains
|
54
|
+
}.to_json
|
55
|
+
|
56
|
+
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
|
57
|
+
http.request(request)
|
58
|
+
end
|
59
|
+
response.body
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -64,7 +64,7 @@ module Langchain::Vectorsearch
|
|
64
64
|
# @param ids [Array<String>] The list of ids to remove
|
65
65
|
# @return [Hash] The response from the server
|
66
66
|
def remove_texts(ids:)
|
67
|
-
collection.delete(ids)
|
67
|
+
collection.delete(ids: ids)
|
68
68
|
end
|
69
69
|
|
70
70
|
# Create the collection with the default schema
|
@@ -122,7 +122,7 @@ module Langchain::Vectorsearch
|
|
122
122
|
# @param k [Integer] The number of results to have in context
|
123
123
|
# @yield [String] Stream responses back one String at a time
|
124
124
|
# @return [String] The answer to the question
|
125
|
-
def ask(question:, k: 4, &
|
125
|
+
def ask(question:, k: 4, &)
|
126
126
|
search_results = similarity_search(query: question, k: k)
|
127
127
|
|
128
128
|
context = search_results.map do |result|
|
@@ -134,7 +134,7 @@ module Langchain::Vectorsearch
|
|
134
134
|
prompt = generate_rag_prompt(question: question, context: context)
|
135
135
|
|
136
136
|
messages = [{role: "user", content: prompt}]
|
137
|
-
response = llm.chat(messages: messages, &
|
137
|
+
response = llm.chat(messages: messages, &)
|
138
138
|
|
139
139
|
response.context = context
|
140
140
|
response
|
@@ -143,7 +143,7 @@ module Langchain::Vectorsearch
|
|
143
143
|
# @param k [Integer] The number of results to have in context
|
144
144
|
# @yield [String] Stream responses back one String at a time
|
145
145
|
# @return [String] The answer to the question
|
146
|
-
def ask(question:, k: 4, &
|
146
|
+
def ask(question:, k: 4, &)
|
147
147
|
search_results = similarity_search(query: question, k: k)
|
148
148
|
|
149
149
|
context = search_results.map do |result|
|
@@ -153,7 +153,7 @@ module Langchain::Vectorsearch
|
|
153
153
|
prompt = generate_rag_prompt(question: question, context: context)
|
154
154
|
|
155
155
|
messages = [{role: "user", content: prompt}]
|
156
|
-
response = llm.chat(messages: messages, &
|
156
|
+
response = llm.chat(messages: messages, &)
|
157
157
|
|
158
158
|
response.context = context
|
159
159
|
response
|
@@ -129,7 +129,7 @@ module Langchain::Vectorsearch
|
|
129
129
|
# @param k [Integer] The number of results to have in context
|
130
130
|
# @yield [String] Stream responses back one String at a time
|
131
131
|
# @return [String] The answer to the question
|
132
|
-
def ask(question:, k: 4, &
|
132
|
+
def ask(question:, k: 4, &)
|
133
133
|
search_results = similarity_search(query: question, k: k)
|
134
134
|
|
135
135
|
context = search_results.map do |result|
|
@@ -140,7 +140,7 @@ module Langchain::Vectorsearch
|
|
140
140
|
prompt = generate_rag_prompt(question: question, context: context)
|
141
141
|
|
142
142
|
messages = [{role: "user", content: prompt}]
|
143
|
-
response = llm.chat(messages: messages, &
|
143
|
+
response = llm.chat(messages: messages, &)
|
144
144
|
|
145
145
|
response.context = context
|
146
146
|
response
|
@@ -58,7 +58,7 @@ module Langchain::Vectorsearch
|
|
58
58
|
#
|
59
59
|
# @param query [String] The text to search for
|
60
60
|
# @param k [Integer] The number of results to return
|
61
|
-
# @return [Array] Results in the format `[[id1,
|
61
|
+
# @return [Array] Results in the format `[[id1, id2], [distance1, distance2]]`
|
62
62
|
#
|
63
63
|
def similarity_search(
|
64
64
|
query:,
|
@@ -77,7 +77,7 @@ module Langchain::Vectorsearch
|
|
77
77
|
#
|
78
78
|
# @param embedding [Array<Float>] The embedding to search for
|
79
79
|
# @param k [Integer] The number of results to return
|
80
|
-
# @return [Array] Results in the format `[[id1,
|
80
|
+
# @return [Array] Results in the format `[[id1, id2], [distance1, distance2]]`
|
81
81
|
#
|
82
82
|
def similarity_search_by_vector(
|
83
83
|
embedding:,
|
@@ -141,7 +141,7 @@ module Langchain::Vectorsearch
|
|
141
141
|
# @param k [Integer] The number of results to have in context
|
142
142
|
# @yield [String] Stream responses back one String at a time
|
143
143
|
# @return [String] The answer to the question
|
144
|
-
def ask(question:, k: 4, &
|
144
|
+
def ask(question:, k: 4, &)
|
145
145
|
search_results = similarity_search(query: question, k: k)
|
146
146
|
|
147
147
|
content_field = search_results.dig("results", "fields_data").select { |field| field.dig("field_name") == "content" }
|
@@ -152,7 +152,7 @@ module Langchain::Vectorsearch
|
|
152
152
|
prompt = generate_rag_prompt(question: question, context: context)
|
153
153
|
|
154
154
|
messages = [{role: "user", content: prompt}]
|
155
|
-
response = llm.chat(messages: messages, &
|
155
|
+
response = llm.chat(messages: messages, &)
|
156
156
|
|
157
157
|
response.context = context
|
158
158
|
response
|
@@ -146,7 +146,7 @@ module Langchain::Vectorsearch
|
|
146
146
|
# @param k [Integer] The number of results to have in context
|
147
147
|
# @yield [String] Stream responses back one String at a time
|
148
148
|
# @return [String] The answer to the question
|
149
|
-
def ask(question:, k: 4, &
|
149
|
+
def ask(question:, k: 4, &)
|
150
150
|
search_results = similarity_search(query: question, k: k)
|
151
151
|
|
152
152
|
context = search_results.map do |result|
|
@@ -157,7 +157,7 @@ module Langchain::Vectorsearch
|
|
157
157
|
prompt = generate_rag_prompt(question: question, context: context)
|
158
158
|
|
159
159
|
messages = [{role: "user", content: prompt}]
|
160
|
-
response = llm.chat(messages: messages, &
|
160
|
+
response = llm.chat(messages: messages, &)
|
161
161
|
|
162
162
|
response.context = context
|
163
163
|
response
|
@@ -171,7 +171,7 @@ module Langchain::Vectorsearch
|
|
171
171
|
# @param filter [String] The filter to use
|
172
172
|
# @yield [String] Stream responses back one String at a time
|
173
173
|
# @return [String] The answer to the question
|
174
|
-
def ask(question:, namespace: "", filter: nil, k: 4, &
|
174
|
+
def ask(question:, namespace: "", filter: nil, k: 4, &)
|
175
175
|
search_results = similarity_search(query: question, namespace: namespace, filter: filter, k: k)
|
176
176
|
|
177
177
|
context = search_results.map do |result|
|
@@ -182,7 +182,7 @@ module Langchain::Vectorsearch
|
|
182
182
|
prompt = generate_rag_prompt(question: question, context: context)
|
183
183
|
|
184
184
|
messages = [{role: "user", content: prompt}]
|
185
|
-
response = llm.chat(messages: messages, &
|
185
|
+
response = llm.chat(messages: messages, &)
|
186
186
|
|
187
187
|
response.context = context
|
188
188
|
response
|
@@ -137,7 +137,7 @@ module Langchain::Vectorsearch
|
|
137
137
|
# @param k [Integer] The number of results to have in context
|
138
138
|
# @yield [String] Stream responses back one String at a time
|
139
139
|
# @return [String] The answer to the question
|
140
|
-
def ask(question:, k: 4, &
|
140
|
+
def ask(question:, k: 4, &)
|
141
141
|
search_results = similarity_search(query: question, k: k)
|
142
142
|
|
143
143
|
context = search_results.map do |result|
|
@@ -148,7 +148,7 @@ module Langchain::Vectorsearch
|
|
148
148
|
prompt = generate_rag_prompt(question: question, context: context)
|
149
149
|
|
150
150
|
messages = [{role: "user", content: prompt}]
|
151
|
-
response = llm.chat(messages: messages, &
|
151
|
+
response = llm.chat(messages: messages, &)
|
152
152
|
|
153
153
|
response.context = context
|
154
154
|
response
|
@@ -143,7 +143,7 @@ module Langchain::Vectorsearch
|
|
143
143
|
# @param k [Integer] The number of results to have in context
|
144
144
|
# @yield [String] Stream responses back one String at a time
|
145
145
|
# @return [Hash] The answer
|
146
|
-
def ask(question:, k: 4, &
|
146
|
+
def ask(question:, k: 4, &)
|
147
147
|
search_results = similarity_search(query: question, k: k)
|
148
148
|
|
149
149
|
context = search_results.map do |result|
|
@@ -154,7 +154,7 @@ module Langchain::Vectorsearch
|
|
154
154
|
prompt = generate_rag_prompt(question: question, context: context)
|
155
155
|
|
156
156
|
messages = [{role: "user", content: prompt}]
|
157
|
-
response = llm.chat(messages: messages, &
|
157
|
+
response = llm.chat(messages: messages, &)
|
158
158
|
|
159
159
|
response.context = context
|
160
160
|
response
|
data/lib/langchain/version.rb
CHANGED
data/lib/langchain.rb
CHANGED
@@ -34,6 +34,7 @@ loader.collapse("#{__dir__}/langchain/tool/file_system")
|
|
34
34
|
loader.collapse("#{__dir__}/langchain/tool/google_search")
|
35
35
|
loader.collapse("#{__dir__}/langchain/tool/ruby_code_interpreter")
|
36
36
|
loader.collapse("#{__dir__}/langchain/tool/news_retriever")
|
37
|
+
loader.collapse("#{__dir__}/langchain/tool/tavily")
|
37
38
|
loader.collapse("#{__dir__}/langchain/tool/vectorsearch")
|
38
39
|
loader.collapse("#{__dir__}/langchain/tool/weather")
|
39
40
|
loader.collapse("#{__dir__}/langchain/tool/wikipedia")
|
metadata
CHANGED
@@ -1,29 +1,15 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-06-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
-
- !ruby/object:Gem::Dependency
|
14
|
-
name: activesupport
|
15
|
-
requirement: !ruby/object:Gem::Requirement
|
16
|
-
requirements:
|
17
|
-
- - ">="
|
18
|
-
- !ruby/object:Gem::Version
|
19
|
-
version: 7.0.8
|
20
|
-
type: :runtime
|
21
|
-
prerelease: false
|
22
|
-
version_requirements: !ruby/object:Gem::Requirement
|
23
|
-
requirements:
|
24
|
-
- - ">="
|
25
|
-
- !ruby/object:Gem::Version
|
26
|
-
version: 7.0.8
|
27
13
|
- !ruby/object:Gem::Dependency
|
28
14
|
name: baran
|
29
15
|
requirement: !ruby/object:Gem::Requirement
|
@@ -52,20 +38,6 @@ dependencies:
|
|
52
38
|
- - "~>"
|
53
39
|
- !ruby/object:Gem::Version
|
54
40
|
version: 1.1.0
|
55
|
-
- !ruby/object:Gem::Dependency
|
56
|
-
name: tiktoken_ruby
|
57
|
-
requirement: !ruby/object:Gem::Requirement
|
58
|
-
requirements:
|
59
|
-
- - "~>"
|
60
|
-
- !ruby/object:Gem::Version
|
61
|
-
version: 0.0.8
|
62
|
-
type: :runtime
|
63
|
-
prerelease: false
|
64
|
-
version_requirements: !ruby/object:Gem::Requirement
|
65
|
-
requirements:
|
66
|
-
- - "~>"
|
67
|
-
- !ruby/object:Gem::Version
|
68
|
-
version: 0.0.8
|
69
41
|
- !ruby/object:Gem::Dependency
|
70
42
|
name: json-schema
|
71
43
|
requirement: !ruby/object:Gem::Requirement
|
@@ -346,6 +318,20 @@ dependencies:
|
|
346
318
|
- - "~>"
|
347
319
|
- !ruby/object:Gem::Version
|
348
320
|
version: 1.6.5
|
321
|
+
- !ruby/object:Gem::Dependency
|
322
|
+
name: faraday
|
323
|
+
requirement: !ruby/object:Gem::Requirement
|
324
|
+
requirements:
|
325
|
+
- - ">="
|
326
|
+
- !ruby/object:Gem::Version
|
327
|
+
version: '0'
|
328
|
+
type: :development
|
329
|
+
prerelease: false
|
330
|
+
version_requirements: !ruby/object:Gem::Requirement
|
331
|
+
requirements:
|
332
|
+
- - ">="
|
333
|
+
- !ruby/object:Gem::Version
|
334
|
+
version: '0'
|
349
335
|
- !ruby/object:Gem::Dependency
|
350
336
|
name: googleauth
|
351
337
|
requirement: !ruby/object:Gem::Requirement
|
@@ -683,33 +669,33 @@ dependencies:
|
|
683
669
|
- !ruby/object:Gem::Version
|
684
670
|
version: 1.17.0
|
685
671
|
- !ruby/object:Gem::Dependency
|
686
|
-
name:
|
672
|
+
name: power_point_pptx
|
687
673
|
requirement: !ruby/object:Gem::Requirement
|
688
674
|
requirements:
|
689
|
-
- - "
|
675
|
+
- - "~>"
|
690
676
|
- !ruby/object:Gem::Version
|
691
|
-
version:
|
677
|
+
version: 0.1.0
|
692
678
|
type: :development
|
693
679
|
prerelease: false
|
694
680
|
version_requirements: !ruby/object:Gem::Requirement
|
695
681
|
requirements:
|
696
|
-
- - "
|
682
|
+
- - "~>"
|
697
683
|
- !ruby/object:Gem::Version
|
698
|
-
version:
|
684
|
+
version: 0.1.0
|
699
685
|
- !ruby/object:Gem::Dependency
|
700
|
-
name:
|
686
|
+
name: tiktoken_ruby
|
701
687
|
requirement: !ruby/object:Gem::Requirement
|
702
688
|
requirements:
|
703
689
|
- - "~>"
|
704
690
|
- !ruby/object:Gem::Version
|
705
|
-
version: 0.
|
691
|
+
version: 0.0.9
|
706
692
|
type: :development
|
707
693
|
prerelease: false
|
708
694
|
version_requirements: !ruby/object:Gem::Requirement
|
709
695
|
requirements:
|
710
696
|
- - "~>"
|
711
697
|
- !ruby/object:Gem::Version
|
712
|
-
version: 0.
|
698
|
+
version: 0.0.9
|
713
699
|
description: Build LLM-backed Ruby applications with Ruby's Langchain.rb
|
714
700
|
email:
|
715
701
|
- andrei.bondarev13@gmail.com
|
@@ -814,6 +800,8 @@ files:
|
|
814
800
|
- lib/langchain/tool/news_retriever/news_retriever.rb
|
815
801
|
- lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.json
|
816
802
|
- lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.rb
|
803
|
+
- lib/langchain/tool/tavily/tavily.json
|
804
|
+
- lib/langchain/tool/tavily/tavily.rb
|
817
805
|
- lib/langchain/tool/vectorsearch/vectorsearch.json
|
818
806
|
- lib/langchain/tool/vectorsearch/vectorsearch.rb
|
819
807
|
- lib/langchain/tool/weather/weather.json
|
@@ -846,8 +834,8 @@ licenses:
|
|
846
834
|
- MIT
|
847
835
|
metadata:
|
848
836
|
homepage_uri: https://rubygems.org/gems/langchainrb
|
849
|
-
source_code_uri: https://github.com/
|
850
|
-
changelog_uri: https://github.com/
|
837
|
+
source_code_uri: https://github.com/patterns-ai-core/langchainrb
|
838
|
+
changelog_uri: https://github.com/patterns-ai-core/langchainrb/blob/main/CHANGELOG.md
|
851
839
|
documentation_uri: https://rubydoc.info/gems/langchainrb
|
852
840
|
post_install_message:
|
853
841
|
rdoc_options: []
|
@@ -864,7 +852,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
864
852
|
- !ruby/object:Gem::Version
|
865
853
|
version: '0'
|
866
854
|
requirements: []
|
867
|
-
rubygems_version: 3.5.
|
855
|
+
rubygems_version: 3.5.11
|
868
856
|
signing_key:
|
869
857
|
specification_version: 4
|
870
858
|
summary: Build LLM-backed Ruby applications with Ruby's Langchain.rb
|