langchainrb 0.17.1 → 0.19.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +30 -0
- data/README.md +7 -2
- data/lib/langchain/{assistants → assistant}/llm/adapter.rb +1 -1
- data/lib/langchain/assistant/llm/adapters/anthropic.rb +105 -0
- data/lib/langchain/assistant/llm/adapters/base.rb +63 -0
- data/lib/langchain/{assistants → assistant}/llm/adapters/google_gemini.rb +43 -3
- data/lib/langchain/{assistants → assistant}/llm/adapters/mistral_ai.rb +39 -2
- data/lib/langchain/assistant/llm/adapters/ollama.rb +94 -0
- data/lib/langchain/{assistants → assistant}/llm/adapters/openai.rb +38 -2
- data/lib/langchain/assistant/messages/anthropic_message.rb +83 -0
- data/lib/langchain/assistant/messages/base.rb +56 -0
- data/lib/langchain/assistant/messages/google_gemini_message.rb +92 -0
- data/lib/langchain/assistant/messages/mistral_ai_message.rb +143 -0
- data/lib/langchain/assistant/messages/ollama_message.rb +76 -0
- data/lib/langchain/assistant/messages/openai_message.rb +105 -0
- data/lib/langchain/{assistants/assistant.rb → assistant.rb} +26 -48
- data/lib/langchain/llm/ai21.rb +1 -1
- data/lib/langchain/llm/anthropic.rb +64 -9
- data/lib/langchain/llm/aws_bedrock.rb +12 -13
- data/lib/langchain/llm/azure.rb +2 -2
- data/lib/langchain/llm/base.rb +1 -1
- data/lib/langchain/llm/cohere.rb +8 -8
- data/lib/langchain/llm/google_gemini.rb +5 -6
- data/lib/langchain/llm/google_vertex_ai.rb +6 -5
- data/lib/langchain/llm/hugging_face.rb +5 -5
- data/lib/langchain/llm/mistral_ai.rb +4 -4
- data/lib/langchain/llm/ollama.rb +7 -8
- data/lib/langchain/llm/openai.rb +8 -7
- data/lib/langchain/llm/parameters/chat.rb +1 -0
- data/lib/langchain/llm/replicate.rb +8 -16
- data/lib/langchain/tool_definition.rb +7 -0
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +1 -14
- metadata +16 -16
- data/lib/langchain/assistants/llm/adapters/_base.rb +0 -21
- data/lib/langchain/assistants/llm/adapters/anthropic.rb +0 -62
- data/lib/langchain/assistants/llm/adapters/ollama.rb +0 -57
- data/lib/langchain/assistants/messages/anthropic_message.rb +0 -75
- data/lib/langchain/assistants/messages/base.rb +0 -54
- data/lib/langchain/assistants/messages/google_gemini_message.rb +0 -90
- data/lib/langchain/assistants/messages/mistral_ai_message.rb +0 -96
- data/lib/langchain/assistants/messages/ollama_message.rb +0 -74
- data/lib/langchain/assistants/messages/openai_message.rb +0 -103
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.19.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-10-
|
11
|
+
date: 2024-10-23 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -637,20 +637,20 @@ files:
|
|
637
637
|
- LICENSE.txt
|
638
638
|
- README.md
|
639
639
|
- lib/langchain.rb
|
640
|
-
- lib/langchain/
|
641
|
-
- lib/langchain/
|
642
|
-
- lib/langchain/
|
643
|
-
- lib/langchain/
|
644
|
-
- lib/langchain/
|
645
|
-
- lib/langchain/
|
646
|
-
- lib/langchain/
|
647
|
-
- lib/langchain/
|
648
|
-
- lib/langchain/
|
649
|
-
- lib/langchain/
|
650
|
-
- lib/langchain/
|
651
|
-
- lib/langchain/
|
652
|
-
- lib/langchain/
|
653
|
-
- lib/langchain/
|
640
|
+
- lib/langchain/assistant.rb
|
641
|
+
- lib/langchain/assistant/llm/adapter.rb
|
642
|
+
- lib/langchain/assistant/llm/adapters/anthropic.rb
|
643
|
+
- lib/langchain/assistant/llm/adapters/base.rb
|
644
|
+
- lib/langchain/assistant/llm/adapters/google_gemini.rb
|
645
|
+
- lib/langchain/assistant/llm/adapters/mistral_ai.rb
|
646
|
+
- lib/langchain/assistant/llm/adapters/ollama.rb
|
647
|
+
- lib/langchain/assistant/llm/adapters/openai.rb
|
648
|
+
- lib/langchain/assistant/messages/anthropic_message.rb
|
649
|
+
- lib/langchain/assistant/messages/base.rb
|
650
|
+
- lib/langchain/assistant/messages/google_gemini_message.rb
|
651
|
+
- lib/langchain/assistant/messages/mistral_ai_message.rb
|
652
|
+
- lib/langchain/assistant/messages/ollama_message.rb
|
653
|
+
- lib/langchain/assistant/messages/openai_message.rb
|
654
654
|
- lib/langchain/chunk.rb
|
655
655
|
- lib/langchain/chunker/base.rb
|
656
656
|
- lib/langchain/chunker/markdown.rb
|
@@ -1,21 +0,0 @@
|
|
1
|
-
module Langchain
|
2
|
-
class Assistant
|
3
|
-
module LLM
|
4
|
-
module Adapters
|
5
|
-
class Base
|
6
|
-
def build_chat_params(tools:, instructions:, messages:, tool_choice:)
|
7
|
-
raise NotImplementedError, "Subclasses must implement build_chat_params"
|
8
|
-
end
|
9
|
-
|
10
|
-
def extract_tool_call_args(tool_call:)
|
11
|
-
raise NotImplementedError, "Subclasses must implement extract_tool_call_args"
|
12
|
-
end
|
13
|
-
|
14
|
-
def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
15
|
-
raise NotImplementedError, "Subclasses must implement build_message"
|
16
|
-
end
|
17
|
-
end
|
18
|
-
end
|
19
|
-
end
|
20
|
-
end
|
21
|
-
end
|
@@ -1,62 +0,0 @@
|
|
1
|
-
module Langchain
|
2
|
-
class Assistant
|
3
|
-
module LLM
|
4
|
-
module Adapters
|
5
|
-
class Anthropic < Base
|
6
|
-
def build_chat_params(tools:, instructions:, messages:, tool_choice:)
|
7
|
-
params = {messages: messages}
|
8
|
-
if tools.any?
|
9
|
-
params[:tools] = build_tools(tools)
|
10
|
-
params[:tool_choice] = build_tool_choice(tool_choice)
|
11
|
-
end
|
12
|
-
params[:system] = instructions if instructions
|
13
|
-
params
|
14
|
-
end
|
15
|
-
|
16
|
-
def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
17
|
-
warn "Image URL is not supported by Anthropic currently" if image_url
|
18
|
-
|
19
|
-
Langchain::Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
20
|
-
end
|
21
|
-
|
22
|
-
# Extract the tool call information from the Anthropic tool call hash
|
23
|
-
#
|
24
|
-
# @param tool_call [Hash] The tool call hash, format: {"type"=>"tool_use", "id"=>"toolu_01TjusbFApEbwKPRWTRwzadR", "name"=>"news_retriever__get_top_headlines", "input"=>{"country"=>"us", "page_size"=>10}}], "stop_reason"=>"tool_use"}
|
25
|
-
# @return [Array] The tool call information
|
26
|
-
def extract_tool_call_args(tool_call:)
|
27
|
-
tool_call_id = tool_call.dig("id")
|
28
|
-
function_name = tool_call.dig("name")
|
29
|
-
tool_name, method_name = function_name.split("__")
|
30
|
-
tool_arguments = tool_call.dig("input").transform_keys(&:to_sym)
|
31
|
-
[tool_call_id, tool_name, method_name, tool_arguments]
|
32
|
-
end
|
33
|
-
|
34
|
-
def build_tools(tools)
|
35
|
-
tools.map { |tool| tool.class.function_schemas.to_anthropic_format }.flatten
|
36
|
-
end
|
37
|
-
|
38
|
-
def allowed_tool_choices
|
39
|
-
["auto", "any"]
|
40
|
-
end
|
41
|
-
|
42
|
-
def available_tool_names(tools)
|
43
|
-
build_tools(tools).map { |tool| tool.dig(:name) }
|
44
|
-
end
|
45
|
-
|
46
|
-
private
|
47
|
-
|
48
|
-
def build_tool_choice(choice)
|
49
|
-
case choice
|
50
|
-
when "auto"
|
51
|
-
{type: "auto"}
|
52
|
-
when "any"
|
53
|
-
{type: "any"}
|
54
|
-
else
|
55
|
-
{type: "tool", name: choice}
|
56
|
-
end
|
57
|
-
end
|
58
|
-
end
|
59
|
-
end
|
60
|
-
end
|
61
|
-
end
|
62
|
-
end
|
@@ -1,57 +0,0 @@
|
|
1
|
-
module Langchain
|
2
|
-
class Assistant
|
3
|
-
module LLM
|
4
|
-
module Adapters
|
5
|
-
class Ollama < Base
|
6
|
-
def build_chat_params(tools:, instructions:, messages:, tool_choice:)
|
7
|
-
params = {messages: messages}
|
8
|
-
if tools.any?
|
9
|
-
params[:tools] = build_tools(tools)
|
10
|
-
end
|
11
|
-
params
|
12
|
-
end
|
13
|
-
|
14
|
-
def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
15
|
-
warn "Image URL is not supported by Ollama currently" if image_url
|
16
|
-
|
17
|
-
Langchain::Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
18
|
-
end
|
19
|
-
|
20
|
-
# Extract the tool call information from the OpenAI tool call hash
|
21
|
-
#
|
22
|
-
# @param tool_call [Hash] The tool call hash
|
23
|
-
# @return [Array] The tool call information
|
24
|
-
def extract_tool_call_args(tool_call:)
|
25
|
-
tool_call_id = tool_call.dig("id")
|
26
|
-
|
27
|
-
function_name = tool_call.dig("function", "name")
|
28
|
-
tool_name, method_name = function_name.split("__")
|
29
|
-
|
30
|
-
tool_arguments = tool_call.dig("function", "arguments")
|
31
|
-
tool_arguments = if tool_arguments.is_a?(Hash)
|
32
|
-
Langchain::Utils::HashTransformer.symbolize_keys(tool_arguments)
|
33
|
-
else
|
34
|
-
JSON.parse(tool_arguments, symbolize_names: true)
|
35
|
-
end
|
36
|
-
|
37
|
-
[tool_call_id, tool_name, method_name, tool_arguments]
|
38
|
-
end
|
39
|
-
|
40
|
-
def available_tool_names(tools)
|
41
|
-
build_tools(tools).map { |tool| tool.dig(:function, :name) }
|
42
|
-
end
|
43
|
-
|
44
|
-
def allowed_tool_choices
|
45
|
-
["auto", "none"]
|
46
|
-
end
|
47
|
-
|
48
|
-
private
|
49
|
-
|
50
|
-
def build_tools(tools)
|
51
|
-
tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
|
52
|
-
end
|
53
|
-
end
|
54
|
-
end
|
55
|
-
end
|
56
|
-
end
|
57
|
-
end
|
@@ -1,75 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class AnthropicMessage < Base
|
6
|
-
ROLES = [
|
7
|
-
"assistant",
|
8
|
-
"user",
|
9
|
-
"tool_result"
|
10
|
-
].freeze
|
11
|
-
|
12
|
-
TOOL_ROLE = "tool_result"
|
13
|
-
|
14
|
-
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
15
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
16
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
17
|
-
|
18
|
-
@role = role
|
19
|
-
# Some Tools return content as a JSON hence `.to_s`
|
20
|
-
@content = content.to_s
|
21
|
-
@tool_calls = tool_calls
|
22
|
-
@tool_call_id = tool_call_id
|
23
|
-
end
|
24
|
-
|
25
|
-
# Convert the message to an Anthropic API-compatible hash
|
26
|
-
#
|
27
|
-
# @return [Hash] The message as an Anthropic API-compatible hash
|
28
|
-
def to_hash
|
29
|
-
{}.tap do |h|
|
30
|
-
h[:role] = tool? ? "user" : role
|
31
|
-
|
32
|
-
h[:content] = if tool?
|
33
|
-
[
|
34
|
-
{
|
35
|
-
type: "tool_result",
|
36
|
-
tool_use_id: tool_call_id,
|
37
|
-
content: content
|
38
|
-
}
|
39
|
-
]
|
40
|
-
elsif tool_calls.any?
|
41
|
-
tool_calls
|
42
|
-
else
|
43
|
-
content
|
44
|
-
end
|
45
|
-
end
|
46
|
-
end
|
47
|
-
|
48
|
-
# Check if the message is a tool call
|
49
|
-
#
|
50
|
-
# @return [Boolean] true/false whether this message is a tool call
|
51
|
-
def tool?
|
52
|
-
role == "tool_result"
|
53
|
-
end
|
54
|
-
|
55
|
-
# Anthropic does not implement system prompts
|
56
|
-
def system?
|
57
|
-
false
|
58
|
-
end
|
59
|
-
|
60
|
-
# Check if the message came from an LLM
|
61
|
-
#
|
62
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
63
|
-
def assistant?
|
64
|
-
role == "assistant"
|
65
|
-
end
|
66
|
-
|
67
|
-
# Check if the message came from an LLM
|
68
|
-
#
|
69
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
70
|
-
def llm?
|
71
|
-
assistant?
|
72
|
-
end
|
73
|
-
end
|
74
|
-
end
|
75
|
-
end
|
@@ -1,54 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class Base
|
6
|
-
attr_reader :role,
|
7
|
-
:content,
|
8
|
-
:image_url,
|
9
|
-
:tool_calls,
|
10
|
-
:tool_call_id
|
11
|
-
|
12
|
-
# Check if the message came from a user
|
13
|
-
#
|
14
|
-
# @return [Boolean] true/false whether the message came from a user
|
15
|
-
def user?
|
16
|
-
role == "user"
|
17
|
-
end
|
18
|
-
|
19
|
-
# Check if the message came from an LLM
|
20
|
-
#
|
21
|
-
# @raise NotImplementedError if the subclass does not implement this method
|
22
|
-
def llm?
|
23
|
-
raise NotImplementedError, "Class #{self.class.name} must implement the method 'llm?'"
|
24
|
-
end
|
25
|
-
|
26
|
-
# Check if the message is a tool call
|
27
|
-
#
|
28
|
-
# @raise NotImplementedError if the subclass does not implement this method
|
29
|
-
def tool?
|
30
|
-
raise NotImplementedError, "Class #{self.class.name} must implement the method 'tool?'"
|
31
|
-
end
|
32
|
-
|
33
|
-
# Check if the message is a system prompt
|
34
|
-
#
|
35
|
-
# @raise NotImplementedError if the subclass does not implement this method
|
36
|
-
def system?
|
37
|
-
raise NotImplementedError, "Class #{self.class.name} must implement the method 'system?'"
|
38
|
-
end
|
39
|
-
|
40
|
-
# Returns the standardized role symbol based on the specific role methods
|
41
|
-
#
|
42
|
-
# @return [Symbol] the standardized role symbol (:system, :llm, :tool, :user, or :unknown)
|
43
|
-
def standard_role
|
44
|
-
return :user if user?
|
45
|
-
return :llm if llm?
|
46
|
-
return :tool if tool?
|
47
|
-
return :system if system?
|
48
|
-
|
49
|
-
# TODO: Should we return :unknown or raise an error?
|
50
|
-
:unknown
|
51
|
-
end
|
52
|
-
end
|
53
|
-
end
|
54
|
-
end
|
@@ -1,90 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class GoogleGeminiMessage < Base
|
6
|
-
# Google Gemini uses the following roles:
|
7
|
-
ROLES = [
|
8
|
-
"user",
|
9
|
-
"model",
|
10
|
-
"function"
|
11
|
-
].freeze
|
12
|
-
|
13
|
-
TOOL_ROLE = "function"
|
14
|
-
|
15
|
-
# Initialize a new Google Gemini message
|
16
|
-
#
|
17
|
-
# @param [String] The role of the message
|
18
|
-
# @param [String] The content of the message
|
19
|
-
# @param [Array<Hash>] The tool calls made in the message
|
20
|
-
# @param [String] The ID of the tool call
|
21
|
-
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
22
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
23
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
24
|
-
|
25
|
-
@role = role
|
26
|
-
# Some Tools return content as a JSON hence `.to_s`
|
27
|
-
@content = content.to_s
|
28
|
-
@tool_calls = tool_calls
|
29
|
-
@tool_call_id = tool_call_id
|
30
|
-
end
|
31
|
-
|
32
|
-
# Check if the message came from an LLM
|
33
|
-
#
|
34
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
35
|
-
def llm?
|
36
|
-
model?
|
37
|
-
end
|
38
|
-
|
39
|
-
# Convert the message to a Google Gemini API-compatible hash
|
40
|
-
#
|
41
|
-
# @return [Hash] The message as a Google Gemini API-compatible hash
|
42
|
-
def to_hash
|
43
|
-
{}.tap do |h|
|
44
|
-
h[:role] = role
|
45
|
-
h[:parts] = if function?
|
46
|
-
[{
|
47
|
-
functionResponse: {
|
48
|
-
name: tool_call_id,
|
49
|
-
response: {
|
50
|
-
name: tool_call_id,
|
51
|
-
content: content
|
52
|
-
}
|
53
|
-
}
|
54
|
-
}]
|
55
|
-
elsif tool_calls.any?
|
56
|
-
tool_calls
|
57
|
-
else
|
58
|
-
[{text: content}]
|
59
|
-
end
|
60
|
-
end
|
61
|
-
end
|
62
|
-
|
63
|
-
# Google Gemini does not implement system prompts
|
64
|
-
def system?
|
65
|
-
false
|
66
|
-
end
|
67
|
-
|
68
|
-
# Check if the message is a tool call
|
69
|
-
#
|
70
|
-
# @return [Boolean] true/false whether this message is a tool call
|
71
|
-
def tool?
|
72
|
-
function?
|
73
|
-
end
|
74
|
-
|
75
|
-
# Check if the message is a tool call
|
76
|
-
#
|
77
|
-
# @return [Boolean] true/false whether this message is a tool call
|
78
|
-
def function?
|
79
|
-
role == "function"
|
80
|
-
end
|
81
|
-
|
82
|
-
# Check if the message came from an LLM
|
83
|
-
#
|
84
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
85
|
-
def model?
|
86
|
-
role == "model"
|
87
|
-
end
|
88
|
-
end
|
89
|
-
end
|
90
|
-
end
|
@@ -1,96 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class MistralAIMessage < Base
|
6
|
-
# MistralAI uses the following roles:
|
7
|
-
ROLES = [
|
8
|
-
"system",
|
9
|
-
"assistant",
|
10
|
-
"user",
|
11
|
-
"tool"
|
12
|
-
].freeze
|
13
|
-
|
14
|
-
TOOL_ROLE = "tool"
|
15
|
-
|
16
|
-
# Initialize a new MistralAI message
|
17
|
-
#
|
18
|
-
# @param role [String] The role of the message
|
19
|
-
# @param content [String] The content of the message
|
20
|
-
# @param image_url [String] The URL of the image
|
21
|
-
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
22
|
-
# @param tool_call_id [String] The ID of the tool call
|
23
|
-
def initialize(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil) # TODO: Implement image_file: reference (https://platform.openai.com/docs/api-reference/messages/object#messages/object-content)
|
24
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
25
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
26
|
-
|
27
|
-
@role = role
|
28
|
-
# Some Tools return content as a JSON hence `.to_s`
|
29
|
-
@content = content.to_s
|
30
|
-
# Make sure you're using the Pixtral model if you want to send image_url
|
31
|
-
@image_url = image_url
|
32
|
-
@tool_calls = tool_calls
|
33
|
-
@tool_call_id = tool_call_id
|
34
|
-
end
|
35
|
-
|
36
|
-
# Check if the message came from an LLM
|
37
|
-
#
|
38
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
39
|
-
def llm?
|
40
|
-
assistant?
|
41
|
-
end
|
42
|
-
|
43
|
-
# Convert the message to an MistralAI API-compatible hash
|
44
|
-
#
|
45
|
-
# @return [Hash] The message as an MistralAI API-compatible hash
|
46
|
-
def to_hash
|
47
|
-
{}.tap do |h|
|
48
|
-
h[:role] = role
|
49
|
-
|
50
|
-
if tool_calls.any?
|
51
|
-
h[:tool_calls] = tool_calls
|
52
|
-
else
|
53
|
-
h[:tool_call_id] = tool_call_id if tool_call_id
|
54
|
-
|
55
|
-
h[:content] = []
|
56
|
-
|
57
|
-
if content && !content.empty?
|
58
|
-
h[:content] << {
|
59
|
-
type: "text",
|
60
|
-
text: content
|
61
|
-
}
|
62
|
-
end
|
63
|
-
|
64
|
-
if image_url
|
65
|
-
h[:content] << {
|
66
|
-
type: "image_url",
|
67
|
-
image_url: image_url
|
68
|
-
}
|
69
|
-
end
|
70
|
-
end
|
71
|
-
end
|
72
|
-
end
|
73
|
-
|
74
|
-
# Check if the message came from an LLM
|
75
|
-
#
|
76
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
77
|
-
def assistant?
|
78
|
-
role == "assistant"
|
79
|
-
end
|
80
|
-
|
81
|
-
# Check if the message are system instructions
|
82
|
-
#
|
83
|
-
# @return [Boolean] true/false whether this message are system instructions
|
84
|
-
def system?
|
85
|
-
role == "system"
|
86
|
-
end
|
87
|
-
|
88
|
-
# Check if the message is a tool call
|
89
|
-
#
|
90
|
-
# @return [Boolean] true/false whether this message is a tool call
|
91
|
-
def tool?
|
92
|
-
role == "tool"
|
93
|
-
end
|
94
|
-
end
|
95
|
-
end
|
96
|
-
end
|
@@ -1,74 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class OllamaMessage < Base
|
6
|
-
# OpenAI uses the following roles:
|
7
|
-
ROLES = [
|
8
|
-
"system",
|
9
|
-
"assistant",
|
10
|
-
"user",
|
11
|
-
"tool"
|
12
|
-
].freeze
|
13
|
-
|
14
|
-
TOOL_ROLE = "tool"
|
15
|
-
|
16
|
-
# Initialize a new OpenAI message
|
17
|
-
#
|
18
|
-
# @param [String] The role of the message
|
19
|
-
# @param [String] The content of the message
|
20
|
-
# @param [Array<Hash>] The tool calls made in the message
|
21
|
-
# @param [String] The ID of the tool call
|
22
|
-
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
23
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
24
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
25
|
-
|
26
|
-
@role = role
|
27
|
-
# Some Tools return content as a JSON hence `.to_s`
|
28
|
-
@content = content.to_s
|
29
|
-
@tool_calls = tool_calls
|
30
|
-
@tool_call_id = tool_call_id
|
31
|
-
end
|
32
|
-
|
33
|
-
# Convert the message to an OpenAI API-compatible hash
|
34
|
-
#
|
35
|
-
# @return [Hash] The message as an OpenAI API-compatible hash
|
36
|
-
def to_hash
|
37
|
-
{}.tap do |h|
|
38
|
-
h[:role] = role
|
39
|
-
h[:content] = content if content # Content is nil for tool calls
|
40
|
-
h[:tool_calls] = tool_calls if tool_calls.any?
|
41
|
-
h[:tool_call_id] = tool_call_id if tool_call_id
|
42
|
-
end
|
43
|
-
end
|
44
|
-
|
45
|
-
# Check if the message came from an LLM
|
46
|
-
#
|
47
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
48
|
-
def llm?
|
49
|
-
assistant?
|
50
|
-
end
|
51
|
-
|
52
|
-
# Check if the message came from an LLM
|
53
|
-
#
|
54
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
55
|
-
def assistant?
|
56
|
-
role == "assistant"
|
57
|
-
end
|
58
|
-
|
59
|
-
# Check if the message are system instructions
|
60
|
-
#
|
61
|
-
# @return [Boolean] true/false whether this message are system instructions
|
62
|
-
def system?
|
63
|
-
role == "system"
|
64
|
-
end
|
65
|
-
|
66
|
-
# Check if the message is a tool call
|
67
|
-
#
|
68
|
-
# @return [Boolean] true/false whether this message is a tool call
|
69
|
-
def tool?
|
70
|
-
role == "tool"
|
71
|
-
end
|
72
|
-
end
|
73
|
-
end
|
74
|
-
end
|
@@ -1,103 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
module Messages
|
5
|
-
class OpenAIMessage < Base
|
6
|
-
# OpenAI uses the following roles:
|
7
|
-
ROLES = [
|
8
|
-
"system",
|
9
|
-
"assistant",
|
10
|
-
"user",
|
11
|
-
"tool"
|
12
|
-
].freeze
|
13
|
-
|
14
|
-
TOOL_ROLE = "tool"
|
15
|
-
|
16
|
-
# Initialize a new OpenAI message
|
17
|
-
#
|
18
|
-
# @param role [String] The role of the message
|
19
|
-
# @param content [String] The content of the message
|
20
|
-
# @param image_url [String] The URL of the image
|
21
|
-
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
22
|
-
# @param tool_call_id [String] The ID of the tool call
|
23
|
-
def initialize(
|
24
|
-
role:,
|
25
|
-
content: nil,
|
26
|
-
image_url: nil,
|
27
|
-
tool_calls: [],
|
28
|
-
tool_call_id: nil
|
29
|
-
)
|
30
|
-
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
31
|
-
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
32
|
-
|
33
|
-
@role = role
|
34
|
-
# Some Tools return content as a JSON hence `.to_s`
|
35
|
-
@content = content.to_s
|
36
|
-
@image_url = image_url
|
37
|
-
@tool_calls = tool_calls
|
38
|
-
@tool_call_id = tool_call_id
|
39
|
-
end
|
40
|
-
|
41
|
-
# Check if the message came from an LLM
|
42
|
-
#
|
43
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
44
|
-
def llm?
|
45
|
-
assistant?
|
46
|
-
end
|
47
|
-
|
48
|
-
# Convert the message to an OpenAI API-compatible hash
|
49
|
-
#
|
50
|
-
# @return [Hash] The message as an OpenAI API-compatible hash
|
51
|
-
def to_hash
|
52
|
-
{}.tap do |h|
|
53
|
-
h[:role] = role
|
54
|
-
|
55
|
-
if tool_calls.any?
|
56
|
-
h[:tool_calls] = tool_calls
|
57
|
-
else
|
58
|
-
h[:tool_call_id] = tool_call_id if tool_call_id
|
59
|
-
|
60
|
-
h[:content] = []
|
61
|
-
|
62
|
-
if content && !content.empty?
|
63
|
-
h[:content] << {
|
64
|
-
type: "text",
|
65
|
-
text: content
|
66
|
-
}
|
67
|
-
end
|
68
|
-
|
69
|
-
if image_url
|
70
|
-
h[:content] << {
|
71
|
-
type: "image_url",
|
72
|
-
image_url: {
|
73
|
-
url: image_url
|
74
|
-
}
|
75
|
-
}
|
76
|
-
end
|
77
|
-
end
|
78
|
-
end
|
79
|
-
end
|
80
|
-
|
81
|
-
# Check if the message came from an LLM
|
82
|
-
#
|
83
|
-
# @return [Boolean] true/false whether this message was produced by an LLM
|
84
|
-
def assistant?
|
85
|
-
role == "assistant"
|
86
|
-
end
|
87
|
-
|
88
|
-
# Check if the message are system instructions
|
89
|
-
#
|
90
|
-
# @return [Boolean] true/false whether this message are system instructions
|
91
|
-
def system?
|
92
|
-
role == "system"
|
93
|
-
end
|
94
|
-
|
95
|
-
# Check if the message is a tool call
|
96
|
-
#
|
97
|
-
# @return [Boolean] true/false whether this message is a tool call
|
98
|
-
def tool?
|
99
|
-
role == "tool"
|
100
|
-
end
|
101
|
-
end
|
102
|
-
end
|
103
|
-
end
|