langchainrb 0.13.3 → 0.13.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/README.md +1 -1
- data/lib/langchain/contextual_logger.rb +2 -2
- data/lib/langchain/loader.rb +3 -3
- data/lib/langchain/tool/news_retriever/news_retriever.rb +4 -1
- data/lib/langchain/vectorsearch/chroma.rb +3 -3
- data/lib/langchain/vectorsearch/elasticsearch.rb +2 -2
- data/lib/langchain/vectorsearch/epsilla.rb +2 -2
- data/lib/langchain/vectorsearch/hnswlib.rb +2 -2
- data/lib/langchain/vectorsearch/milvus.rb +2 -2
- data/lib/langchain/vectorsearch/pgvector.rb +2 -2
- data/lib/langchain/vectorsearch/pinecone.rb +2 -2
- data/lib/langchain/vectorsearch/qdrant.rb +2 -2
- data/lib/langchain/vectorsearch/weaviate.rb +2 -2
- data/lib/langchain/version.rb +1 -1
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7877086b6c4d0bba6c1fc4cafc156ff476ad83eb30df1a39b279885b224bf35d
|
4
|
+
data.tar.gz: 3a22b060896725308c5ce137ee5617a44a75355cfc7878ce6080e6b200722c51
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 05606b99693c0e81f3785a027e155205a3ffce8f4f236868395de837e2bc6f71661c39f6a2cc062e3c0a57a6c8295e13b6910df296a9092f2f7d0596e1c969b0
|
7
|
+
data.tar.gz: 00d478f82be9984a95a1d11676982dec79dea2a6c0bf92ceb1ab0e3309edac2ecee115a3dd46ca060f040e44d9b7c8623ffd27ba1d5fcd8182832f933eaf2815
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,10 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.13.4] - 2024-06-16
|
4
|
+
- Fix Chroma#remove_texts() method
|
5
|
+
- Fix NewsRetriever Tool returning non UTF-8 characters
|
6
|
+
- Misc fixes and improvements
|
7
|
+
|
3
8
|
## [0.13.3] - 2024-06-03
|
4
9
|
- New 🛠️ `Langchain::Tool::Tavily` to execute search (better than the GoogleSearch tool)
|
5
10
|
- Remove `activesupport` dependency
|
data/README.md
CHANGED
@@ -57,7 +57,7 @@ Langchain.rb wraps supported LLMs in a unified interface allowing you to easily
|
|
57
57
|
#### Supported LLMs and features:
|
58
58
|
| LLM providers | `embed()` | `complete()` | `chat()` | `summarize()` | Notes |
|
59
59
|
| -------- |:------------------:| :-------: | :-----------------: | :-------: | :----------------- |
|
60
|
-
| [OpenAI](https://openai.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ |
|
60
|
+
| [OpenAI](https://openai.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | Including Azure OpenAI |
|
61
61
|
| [AI21](https://ai21.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ❌ | ✅ | |
|
62
62
|
| [Anthropic](https://anthropic.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ✅ | ❌ | |
|
63
63
|
| [AwsBedrock](https://aws.amazon.com/bedrock?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ❌ | Provides AWS, Cohere, AI21, Antropic and Stability AI models |
|
@@ -35,8 +35,8 @@ module Langchain
|
|
35
35
|
@logger.respond_to?(method, include_private)
|
36
36
|
end
|
37
37
|
|
38
|
-
def method_missing(method, *args, **kwargs, &
|
39
|
-
return @logger.send(method, *args, **kwargs, &
|
38
|
+
def method_missing(method, *args, **kwargs, &)
|
39
|
+
return @logger.send(method, *args, **kwargs, &) unless @levels.include?(method)
|
40
40
|
|
41
41
|
for_class = kwargs.delete(:for)
|
42
42
|
for_class_name = for_class&.name
|
data/lib/langchain/loader.rb
CHANGED
@@ -90,7 +90,7 @@ module Langchain
|
|
90
90
|
private
|
91
91
|
|
92
92
|
def load_from_url
|
93
|
-
URI.parse(@path).open
|
93
|
+
URI.parse(URI::DEFAULT_PARSER.escape(@path)).open
|
94
94
|
end
|
95
95
|
|
96
96
|
def load_from_path
|
@@ -105,7 +105,7 @@ module Langchain
|
|
105
105
|
# Only load and add to result files with supported extensions
|
106
106
|
Langchain::Loader.new(file, @options).load(&block)
|
107
107
|
rescue
|
108
|
-
UnknownFormatError
|
108
|
+
UnknownFormatError.new("Unknown format: #{source_type}")
|
109
109
|
end.flatten.compact
|
110
110
|
end
|
111
111
|
# rubocop:enable Style/ArgumentsForwarding
|
@@ -123,7 +123,7 @@ module Langchain
|
|
123
123
|
end
|
124
124
|
|
125
125
|
def processor_klass
|
126
|
-
raise UnknownFormatError unless (kind = find_processor)
|
126
|
+
raise UnknownFormatError.new("Unknown format: #{source_type}") unless (kind = find_processor)
|
127
127
|
|
128
128
|
Langchain::Processors.const_get(kind)
|
129
129
|
end
|
@@ -126,7 +126,10 @@ module Langchain::Tool
|
|
126
126
|
request["Content-Type"] = "application/json"
|
127
127
|
|
128
128
|
response = http.request(request)
|
129
|
-
response
|
129
|
+
response
|
130
|
+
.body
|
131
|
+
# Remove non-UTF-8 characters
|
132
|
+
.force_encoding(Encoding::UTF_8)
|
130
133
|
end
|
131
134
|
end
|
132
135
|
end
|
@@ -64,7 +64,7 @@ module Langchain::Vectorsearch
|
|
64
64
|
# @param ids [Array<String>] The list of ids to remove
|
65
65
|
# @return [Hash] The response from the server
|
66
66
|
def remove_texts(ids:)
|
67
|
-
collection.delete(ids)
|
67
|
+
collection.delete(ids: ids)
|
68
68
|
end
|
69
69
|
|
70
70
|
# Create the collection with the default schema
|
@@ -122,7 +122,7 @@ module Langchain::Vectorsearch
|
|
122
122
|
# @param k [Integer] The number of results to have in context
|
123
123
|
# @yield [String] Stream responses back one String at a time
|
124
124
|
# @return [String] The answer to the question
|
125
|
-
def ask(question:, k: 4, &
|
125
|
+
def ask(question:, k: 4, &)
|
126
126
|
search_results = similarity_search(query: question, k: k)
|
127
127
|
|
128
128
|
context = search_results.map do |result|
|
@@ -134,7 +134,7 @@ module Langchain::Vectorsearch
|
|
134
134
|
prompt = generate_rag_prompt(question: question, context: context)
|
135
135
|
|
136
136
|
messages = [{role: "user", content: prompt}]
|
137
|
-
response = llm.chat(messages: messages, &
|
137
|
+
response = llm.chat(messages: messages, &)
|
138
138
|
|
139
139
|
response.context = context
|
140
140
|
response
|
@@ -143,7 +143,7 @@ module Langchain::Vectorsearch
|
|
143
143
|
# @param k [Integer] The number of results to have in context
|
144
144
|
# @yield [String] Stream responses back one String at a time
|
145
145
|
# @return [String] The answer to the question
|
146
|
-
def ask(question:, k: 4, &
|
146
|
+
def ask(question:, k: 4, &)
|
147
147
|
search_results = similarity_search(query: question, k: k)
|
148
148
|
|
149
149
|
context = search_results.map do |result|
|
@@ -153,7 +153,7 @@ module Langchain::Vectorsearch
|
|
153
153
|
prompt = generate_rag_prompt(question: question, context: context)
|
154
154
|
|
155
155
|
messages = [{role: "user", content: prompt}]
|
156
|
-
response = llm.chat(messages: messages, &
|
156
|
+
response = llm.chat(messages: messages, &)
|
157
157
|
|
158
158
|
response.context = context
|
159
159
|
response
|
@@ -129,7 +129,7 @@ module Langchain::Vectorsearch
|
|
129
129
|
# @param k [Integer] The number of results to have in context
|
130
130
|
# @yield [String] Stream responses back one String at a time
|
131
131
|
# @return [String] The answer to the question
|
132
|
-
def ask(question:, k: 4, &
|
132
|
+
def ask(question:, k: 4, &)
|
133
133
|
search_results = similarity_search(query: question, k: k)
|
134
134
|
|
135
135
|
context = search_results.map do |result|
|
@@ -140,7 +140,7 @@ module Langchain::Vectorsearch
|
|
140
140
|
prompt = generate_rag_prompt(question: question, context: context)
|
141
141
|
|
142
142
|
messages = [{role: "user", content: prompt}]
|
143
|
-
response = llm.chat(messages: messages, &
|
143
|
+
response = llm.chat(messages: messages, &)
|
144
144
|
|
145
145
|
response.context = context
|
146
146
|
response
|
@@ -58,7 +58,7 @@ module Langchain::Vectorsearch
|
|
58
58
|
#
|
59
59
|
# @param query [String] The text to search for
|
60
60
|
# @param k [Integer] The number of results to return
|
61
|
-
# @return [Array] Results in the format `[[id1,
|
61
|
+
# @return [Array] Results in the format `[[id1, id2], [distance1, distance2]]`
|
62
62
|
#
|
63
63
|
def similarity_search(
|
64
64
|
query:,
|
@@ -77,7 +77,7 @@ module Langchain::Vectorsearch
|
|
77
77
|
#
|
78
78
|
# @param embedding [Array<Float>] The embedding to search for
|
79
79
|
# @param k [Integer] The number of results to return
|
80
|
-
# @return [Array] Results in the format `[[id1,
|
80
|
+
# @return [Array] Results in the format `[[id1, id2], [distance1, distance2]]`
|
81
81
|
#
|
82
82
|
def similarity_search_by_vector(
|
83
83
|
embedding:,
|
@@ -141,7 +141,7 @@ module Langchain::Vectorsearch
|
|
141
141
|
# @param k [Integer] The number of results to have in context
|
142
142
|
# @yield [String] Stream responses back one String at a time
|
143
143
|
# @return [String] The answer to the question
|
144
|
-
def ask(question:, k: 4, &
|
144
|
+
def ask(question:, k: 4, &)
|
145
145
|
search_results = similarity_search(query: question, k: k)
|
146
146
|
|
147
147
|
content_field = search_results.dig("results", "fields_data").select { |field| field.dig("field_name") == "content" }
|
@@ -152,7 +152,7 @@ module Langchain::Vectorsearch
|
|
152
152
|
prompt = generate_rag_prompt(question: question, context: context)
|
153
153
|
|
154
154
|
messages = [{role: "user", content: prompt}]
|
155
|
-
response = llm.chat(messages: messages, &
|
155
|
+
response = llm.chat(messages: messages, &)
|
156
156
|
|
157
157
|
response.context = context
|
158
158
|
response
|
@@ -146,7 +146,7 @@ module Langchain::Vectorsearch
|
|
146
146
|
# @param k [Integer] The number of results to have in context
|
147
147
|
# @yield [String] Stream responses back one String at a time
|
148
148
|
# @return [String] The answer to the question
|
149
|
-
def ask(question:, k: 4, &
|
149
|
+
def ask(question:, k: 4, &)
|
150
150
|
search_results = similarity_search(query: question, k: k)
|
151
151
|
|
152
152
|
context = search_results.map do |result|
|
@@ -157,7 +157,7 @@ module Langchain::Vectorsearch
|
|
157
157
|
prompt = generate_rag_prompt(question: question, context: context)
|
158
158
|
|
159
159
|
messages = [{role: "user", content: prompt}]
|
160
|
-
response = llm.chat(messages: messages, &
|
160
|
+
response = llm.chat(messages: messages, &)
|
161
161
|
|
162
162
|
response.context = context
|
163
163
|
response
|
@@ -171,7 +171,7 @@ module Langchain::Vectorsearch
|
|
171
171
|
# @param filter [String] The filter to use
|
172
172
|
# @yield [String] Stream responses back one String at a time
|
173
173
|
# @return [String] The answer to the question
|
174
|
-
def ask(question:, namespace: "", filter: nil, k: 4, &
|
174
|
+
def ask(question:, namespace: "", filter: nil, k: 4, &)
|
175
175
|
search_results = similarity_search(query: question, namespace: namespace, filter: filter, k: k)
|
176
176
|
|
177
177
|
context = search_results.map do |result|
|
@@ -182,7 +182,7 @@ module Langchain::Vectorsearch
|
|
182
182
|
prompt = generate_rag_prompt(question: question, context: context)
|
183
183
|
|
184
184
|
messages = [{role: "user", content: prompt}]
|
185
|
-
response = llm.chat(messages: messages, &
|
185
|
+
response = llm.chat(messages: messages, &)
|
186
186
|
|
187
187
|
response.context = context
|
188
188
|
response
|
@@ -137,7 +137,7 @@ module Langchain::Vectorsearch
|
|
137
137
|
# @param k [Integer] The number of results to have in context
|
138
138
|
# @yield [String] Stream responses back one String at a time
|
139
139
|
# @return [String] The answer to the question
|
140
|
-
def ask(question:, k: 4, &
|
140
|
+
def ask(question:, k: 4, &)
|
141
141
|
search_results = similarity_search(query: question, k: k)
|
142
142
|
|
143
143
|
context = search_results.map do |result|
|
@@ -148,7 +148,7 @@ module Langchain::Vectorsearch
|
|
148
148
|
prompt = generate_rag_prompt(question: question, context: context)
|
149
149
|
|
150
150
|
messages = [{role: "user", content: prompt}]
|
151
|
-
response = llm.chat(messages: messages, &
|
151
|
+
response = llm.chat(messages: messages, &)
|
152
152
|
|
153
153
|
response.context = context
|
154
154
|
response
|
@@ -143,7 +143,7 @@ module Langchain::Vectorsearch
|
|
143
143
|
# @param k [Integer] The number of results to have in context
|
144
144
|
# @yield [String] Stream responses back one String at a time
|
145
145
|
# @return [Hash] The answer
|
146
|
-
def ask(question:, k: 4, &
|
146
|
+
def ask(question:, k: 4, &)
|
147
147
|
search_results = similarity_search(query: question, k: k)
|
148
148
|
|
149
149
|
context = search_results.map do |result|
|
@@ -154,7 +154,7 @@ module Langchain::Vectorsearch
|
|
154
154
|
prompt = generate_rag_prompt(question: question, context: context)
|
155
155
|
|
156
156
|
messages = [{role: "user", content: prompt}]
|
157
|
-
response = llm.chat(messages: messages, &
|
157
|
+
response = llm.chat(messages: messages, &)
|
158
158
|
|
159
159
|
response.context = context
|
160
160
|
response
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-06-
|
11
|
+
date: 2024-06-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -834,8 +834,8 @@ licenses:
|
|
834
834
|
- MIT
|
835
835
|
metadata:
|
836
836
|
homepage_uri: https://rubygems.org/gems/langchainrb
|
837
|
-
source_code_uri: https://github.com/
|
838
|
-
changelog_uri: https://github.com/
|
837
|
+
source_code_uri: https://github.com/patterns-ai-core/langchainrb
|
838
|
+
changelog_uri: https://github.com/patterns-ai-core/langchainrb/blob/main/CHANGELOG.md
|
839
839
|
documentation_uri: https://rubydoc.info/gems/langchainrb
|
840
840
|
post_install_message:
|
841
841
|
rdoc_options: []
|