langchainrb 0.12.0 → 0.13.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/README.md +3 -2
- data/lib/langchain/assistants/assistant.rb +75 -20
- data/lib/langchain/assistants/messages/base.rb +16 -0
- data/lib/langchain/assistants/messages/google_gemini_message.rb +90 -0
- data/lib/langchain/assistants/messages/openai_message.rb +74 -0
- data/lib/langchain/assistants/thread.rb +5 -5
- data/lib/langchain/llm/anthropic.rb +27 -49
- data/lib/langchain/llm/aws_bedrock.rb +30 -34
- data/lib/langchain/llm/azure.rb +6 -0
- data/lib/langchain/llm/base.rb +20 -1
- data/lib/langchain/llm/cohere.rb +38 -6
- data/lib/langchain/llm/google_gemini.rb +67 -0
- data/lib/langchain/llm/google_vertex_ai.rb +68 -112
- data/lib/langchain/llm/mistral_ai.rb +10 -19
- data/lib/langchain/llm/ollama.rb +23 -27
- data/lib/langchain/llm/openai.rb +20 -48
- data/lib/langchain/llm/parameters/chat.rb +51 -0
- data/lib/langchain/llm/response/base_response.rb +2 -2
- data/lib/langchain/llm/response/cohere_response.rb +16 -0
- data/lib/langchain/llm/response/google_gemini_response.rb +45 -0
- data/lib/langchain/llm/response/openai_response.rb +5 -1
- data/lib/langchain/llm/unified_parameters.rb +98 -0
- data/lib/langchain/loader.rb +6 -0
- data/lib/langchain/tool/base.rb +16 -6
- data/lib/langchain/tool/calculator/calculator.json +1 -1
- data/lib/langchain/tool/database/database.json +3 -3
- data/lib/langchain/tool/file_system/file_system.json +3 -3
- data/lib/langchain/tool/news_retriever/news_retriever.json +121 -0
- data/lib/langchain/tool/news_retriever/news_retriever.rb +132 -0
- data/lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.json +1 -1
- data/lib/langchain/tool/vectorsearch/vectorsearch.json +1 -1
- data/lib/langchain/tool/weather/weather.json +1 -1
- data/lib/langchain/tool/wikipedia/wikipedia.json +1 -1
- data/lib/langchain/tool/wikipedia/wikipedia.rb +2 -2
- data/lib/langchain/utils/token_length/openai_validator.rb +6 -1
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +3 -0
- metadata +22 -15
- data/lib/langchain/assistants/message.rb +0 -58
- data/lib/langchain/llm/response/google_vertex_ai_response.rb +0 -33
@@ -28,10 +28,11 @@ module Langchain
|
|
28
28
|
"text-embedding-3-large" => 8191,
|
29
29
|
"text-embedding-3-small" => 8191,
|
30
30
|
"text-embedding-ada-002" => 8191,
|
31
|
-
"gpt-3.5-turbo" =>
|
31
|
+
"gpt-3.5-turbo" => 16385,
|
32
32
|
"gpt-3.5-turbo-0301" => 4096,
|
33
33
|
"gpt-3.5-turbo-0613" => 4096,
|
34
34
|
"gpt-3.5-turbo-1106" => 16385,
|
35
|
+
"gpt-3.5-turbo-0125" => 16385,
|
35
36
|
"gpt-3.5-turbo-16k" => 16384,
|
36
37
|
"gpt-3.5-turbo-16k-0613" => 16384,
|
37
38
|
"text-davinci-003" => 4097,
|
@@ -44,9 +45,13 @@ module Langchain
|
|
44
45
|
"gpt-4-32k-0314" => 32768,
|
45
46
|
"gpt-4-32k-0613" => 32768,
|
46
47
|
"gpt-4-1106-preview" => 128000,
|
48
|
+
"gpt-4-turbo" => 128000,
|
49
|
+
"gpt-4-turbo-2024-04-09" => 128000,
|
47
50
|
"gpt-4-turbo-preview" => 128000,
|
48
51
|
"gpt-4-0125-preview" => 128000,
|
49
52
|
"gpt-4-vision-preview" => 128000,
|
53
|
+
"gpt-4o" => 128000,
|
54
|
+
"gpt-4o-2024-05-13" => 128000,
|
50
55
|
"text-curie-001" => 2049,
|
51
56
|
"text-babbage-001" => 2049,
|
52
57
|
"text-ada-001" => 2049,
|
data/lib/langchain/version.rb
CHANGED
data/lib/langchain.rb
CHANGED
@@ -12,6 +12,7 @@ loader.inflector.inflect(
|
|
12
12
|
"ai21_response" => "AI21Response",
|
13
13
|
"ai21_validator" => "AI21Validator",
|
14
14
|
"csv" => "CSV",
|
15
|
+
"google_vertex_ai" => "GoogleVertexAI",
|
15
16
|
"html" => "HTML",
|
16
17
|
"json" => "JSON",
|
17
18
|
"jsonl" => "JSONL",
|
@@ -21,6 +22,7 @@ loader.inflector.inflect(
|
|
21
22
|
"openai" => "OpenAI",
|
22
23
|
"openai_validator" => "OpenAIValidator",
|
23
24
|
"openai_response" => "OpenAIResponse",
|
25
|
+
"openai_message" => "OpenAIMessage",
|
24
26
|
"pdf" => "PDF"
|
25
27
|
)
|
26
28
|
loader.collapse("#{__dir__}/langchain/llm/response")
|
@@ -31,6 +33,7 @@ loader.collapse("#{__dir__}/langchain/tool/database")
|
|
31
33
|
loader.collapse("#{__dir__}/langchain/tool/file_system")
|
32
34
|
loader.collapse("#{__dir__}/langchain/tool/google_search")
|
33
35
|
loader.collapse("#{__dir__}/langchain/tool/ruby_code_interpreter")
|
36
|
+
loader.collapse("#{__dir__}/langchain/tool/news_retriever")
|
34
37
|
loader.collapse("#{__dir__}/langchain/tool/vectorsearch")
|
35
38
|
loader.collapse("#{__dir__}/langchain/tool/weather")
|
36
39
|
loader.collapse("#{__dir__}/langchain/tool/wikipedia")
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.13.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-05-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activesupport
|
@@ -238,16 +238,16 @@ dependencies:
|
|
238
238
|
name: anthropic
|
239
239
|
requirement: !ruby/object:Gem::Requirement
|
240
240
|
requirements:
|
241
|
-
- - "
|
241
|
+
- - "~>"
|
242
242
|
- !ruby/object:Gem::Version
|
243
|
-
version: '0'
|
243
|
+
version: '0.2'
|
244
244
|
type: :development
|
245
245
|
prerelease: false
|
246
246
|
version_requirements: !ruby/object:Gem::Requirement
|
247
247
|
requirements:
|
248
|
-
- - "
|
248
|
+
- - "~>"
|
249
249
|
- !ruby/object:Gem::Version
|
250
|
-
version: '0'
|
250
|
+
version: '0.2'
|
251
251
|
- !ruby/object:Gem::Dependency
|
252
252
|
name: aws-sdk-bedrockruntime
|
253
253
|
requirement: !ruby/object:Gem::Requirement
|
@@ -282,14 +282,14 @@ dependencies:
|
|
282
282
|
requirements:
|
283
283
|
- - "~>"
|
284
284
|
- !ruby/object:Gem::Version
|
285
|
-
version: 0.9.
|
285
|
+
version: 0.9.10
|
286
286
|
type: :development
|
287
287
|
prerelease: false
|
288
288
|
version_requirements: !ruby/object:Gem::Requirement
|
289
289
|
requirements:
|
290
290
|
- - "~>"
|
291
291
|
- !ruby/object:Gem::Version
|
292
|
-
version: 0.9.
|
292
|
+
version: 0.9.10
|
293
293
|
- !ruby/object:Gem::Dependency
|
294
294
|
name: docx
|
295
295
|
requirement: !ruby/object:Gem::Requirement
|
@@ -347,19 +347,19 @@ dependencies:
|
|
347
347
|
- !ruby/object:Gem::Version
|
348
348
|
version: 1.6.5
|
349
349
|
- !ruby/object:Gem::Dependency
|
350
|
-
name:
|
350
|
+
name: googleauth
|
351
351
|
requirement: !ruby/object:Gem::Requirement
|
352
352
|
requirements:
|
353
|
-
- - "
|
353
|
+
- - ">="
|
354
354
|
- !ruby/object:Gem::Version
|
355
|
-
version: '0
|
355
|
+
version: '0'
|
356
356
|
type: :development
|
357
357
|
prerelease: false
|
358
358
|
version_requirements: !ruby/object:Gem::Requirement
|
359
359
|
requirements:
|
360
|
-
- - "
|
360
|
+
- - ">="
|
361
361
|
- !ruby/object:Gem::Version
|
362
|
-
version: '0
|
362
|
+
version: '0'
|
363
363
|
- !ruby/object:Gem::Dependency
|
364
364
|
name: google_palm_api
|
365
365
|
requirement: !ruby/object:Gem::Requirement
|
@@ -708,7 +708,9 @@ files:
|
|
708
708
|
- README.md
|
709
709
|
- lib/langchain.rb
|
710
710
|
- lib/langchain/assistants/assistant.rb
|
711
|
-
- lib/langchain/assistants/
|
711
|
+
- lib/langchain/assistants/messages/base.rb
|
712
|
+
- lib/langchain/assistants/messages/google_gemini_message.rb
|
713
|
+
- lib/langchain/assistants/messages/openai_message.rb
|
712
714
|
- lib/langchain/assistants/thread.rb
|
713
715
|
- lib/langchain/chunk.rb
|
714
716
|
- lib/langchain/chunker/base.rb
|
@@ -735,6 +737,7 @@ files:
|
|
735
737
|
- lib/langchain/llm/azure.rb
|
736
738
|
- lib/langchain/llm/base.rb
|
737
739
|
- lib/langchain/llm/cohere.rb
|
740
|
+
- lib/langchain/llm/google_gemini.rb
|
738
741
|
- lib/langchain/llm/google_palm.rb
|
739
742
|
- lib/langchain/llm/google_vertex_ai.rb
|
740
743
|
- lib/langchain/llm/hugging_face.rb
|
@@ -742,6 +745,7 @@ files:
|
|
742
745
|
- lib/langchain/llm/mistral_ai.rb
|
743
746
|
- lib/langchain/llm/ollama.rb
|
744
747
|
- lib/langchain/llm/openai.rb
|
748
|
+
- lib/langchain/llm/parameters/chat.rb
|
745
749
|
- lib/langchain/llm/prompts/ollama/summarize_template.yaml
|
746
750
|
- lib/langchain/llm/prompts/summarize_template.yaml
|
747
751
|
- lib/langchain/llm/replicate.rb
|
@@ -750,14 +754,15 @@ files:
|
|
750
754
|
- lib/langchain/llm/response/aws_titan_response.rb
|
751
755
|
- lib/langchain/llm/response/base_response.rb
|
752
756
|
- lib/langchain/llm/response/cohere_response.rb
|
757
|
+
- lib/langchain/llm/response/google_gemini_response.rb
|
753
758
|
- lib/langchain/llm/response/google_palm_response.rb
|
754
|
-
- lib/langchain/llm/response/google_vertex_ai_response.rb
|
755
759
|
- lib/langchain/llm/response/hugging_face_response.rb
|
756
760
|
- lib/langchain/llm/response/llama_cpp_response.rb
|
757
761
|
- lib/langchain/llm/response/mistral_ai_response.rb
|
758
762
|
- lib/langchain/llm/response/ollama_response.rb
|
759
763
|
- lib/langchain/llm/response/openai_response.rb
|
760
764
|
- lib/langchain/llm/response/replicate_response.rb
|
765
|
+
- lib/langchain/llm/unified_parameters.rb
|
761
766
|
- lib/langchain/loader.rb
|
762
767
|
- lib/langchain/output_parsers/base.rb
|
763
768
|
- lib/langchain/output_parsers/output_fixing_parser.rb
|
@@ -789,6 +794,8 @@ files:
|
|
789
794
|
- lib/langchain/tool/file_system/file_system.rb
|
790
795
|
- lib/langchain/tool/google_search/google_search.json
|
791
796
|
- lib/langchain/tool/google_search/google_search.rb
|
797
|
+
- lib/langchain/tool/news_retriever/news_retriever.json
|
798
|
+
- lib/langchain/tool/news_retriever/news_retriever.rb
|
792
799
|
- lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.json
|
793
800
|
- lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.rb
|
794
801
|
- lib/langchain/tool/vectorsearch/vectorsearch.json
|
@@ -1,58 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
# Langchain::Message are the messages that are sent to LLM chat methods
|
5
|
-
class Message
|
6
|
-
attr_reader :role, :content, :tool_calls, :tool_call_id
|
7
|
-
|
8
|
-
ROLES = %w[
|
9
|
-
system
|
10
|
-
assistant
|
11
|
-
user
|
12
|
-
tool
|
13
|
-
].freeze
|
14
|
-
|
15
|
-
# @param role [String] The role of the message
|
16
|
-
# @param content [String] The content of the message
|
17
|
-
# @param tool_calls [Array<Hash>] Tool calls to be made
|
18
|
-
# @param tool_call_id [String] The ID of the tool call to be made
|
19
|
-
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil) # TODO: Implement image_file: reference (https://platform.openai.com/docs/api-reference/messages/object#messages/object-content)
|
20
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
21
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
22
|
-
|
23
|
-
@role = role
|
24
|
-
# Some Tools return content as a JSON hence `.to_s`
|
25
|
-
@content = content.to_s
|
26
|
-
@tool_calls = tool_calls
|
27
|
-
@tool_call_id = tool_call_id
|
28
|
-
end
|
29
|
-
|
30
|
-
# Convert the message to an OpenAI API-compatible hash
|
31
|
-
#
|
32
|
-
# @return [Hash] The message as an OpenAI API-compatible hash
|
33
|
-
def to_openai_format
|
34
|
-
{}.tap do |h|
|
35
|
-
h[:role] = role
|
36
|
-
h[:content] = content if content # Content is nil for tool calls
|
37
|
-
h[:tool_calls] = tool_calls if tool_calls.any?
|
38
|
-
h[:tool_call_id] = tool_call_id if tool_call_id
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
def assistant?
|
43
|
-
role == "assistant"
|
44
|
-
end
|
45
|
-
|
46
|
-
def system?
|
47
|
-
role == "system"
|
48
|
-
end
|
49
|
-
|
50
|
-
def user?
|
51
|
-
role == "user"
|
52
|
-
end
|
53
|
-
|
54
|
-
def tool?
|
55
|
-
role == "tool"
|
56
|
-
end
|
57
|
-
end
|
58
|
-
end
|
@@ -1,33 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain::LLM
|
4
|
-
class GoogleVertexAiResponse < BaseResponse
|
5
|
-
attr_reader :prompt_tokens
|
6
|
-
|
7
|
-
def initialize(raw_response, model: nil)
|
8
|
-
@prompt_tokens = prompt_tokens
|
9
|
-
super(raw_response, model: model)
|
10
|
-
end
|
11
|
-
|
12
|
-
def completion
|
13
|
-
# completions&.dig(0, "output")
|
14
|
-
raw_response.predictions[0]["content"]
|
15
|
-
end
|
16
|
-
|
17
|
-
def embedding
|
18
|
-
embeddings.first
|
19
|
-
end
|
20
|
-
|
21
|
-
def completions
|
22
|
-
raw_response.predictions.map { |p| p["content"] }
|
23
|
-
end
|
24
|
-
|
25
|
-
def total_tokens
|
26
|
-
raw_response.dig(:predictions, 0, :embeddings, :statistics, :token_count)
|
27
|
-
end
|
28
|
-
|
29
|
-
def embeddings
|
30
|
-
[raw_response.dig(:predictions, 0, :embeddings, :values)]
|
31
|
-
end
|
32
|
-
end
|
33
|
-
end
|