langchainrb 0.18.0 → 0.19.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +30 -0
- data/README.md +4 -4
- data/lib/langchain/assistant/llm/adapter.rb +7 -6
- data/lib/langchain/assistant/llm/adapters/anthropic.rb +1 -3
- data/lib/langchain/assistant/llm/adapters/aws_bedrock_anthropic.rb +35 -0
- data/lib/langchain/assistant/llm/adapters/ollama.rb +1 -3
- data/lib/langchain/assistant/messages/anthropic_message.rb +89 -17
- data/lib/langchain/assistant/messages/base.rb +4 -0
- data/lib/langchain/assistant/messages/google_gemini_message.rb +62 -21
- data/lib/langchain/assistant/messages/mistral_ai_message.rb +69 -24
- data/lib/langchain/assistant/messages/ollama_message.rb +9 -5
- data/lib/langchain/assistant/messages/openai_message.rb +78 -26
- data/lib/langchain/assistant.rb +2 -1
- data/lib/langchain/llm/anthropic.rb +10 -10
- data/lib/langchain/llm/aws_bedrock.rb +75 -120
- data/lib/langchain/llm/azure.rb +1 -1
- data/lib/langchain/llm/base.rb +1 -1
- data/lib/langchain/llm/cohere.rb +8 -8
- data/lib/langchain/llm/google_gemini.rb +5 -6
- data/lib/langchain/llm/google_vertex_ai.rb +6 -5
- data/lib/langchain/llm/hugging_face.rb +4 -4
- data/lib/langchain/llm/mistral_ai.rb +4 -4
- data/lib/langchain/llm/ollama.rb +10 -8
- data/lib/langchain/llm/openai.rb +6 -5
- data/lib/langchain/llm/parameters/chat.rb +4 -1
- data/lib/langchain/llm/replicate.rb +6 -6
- data/lib/langchain/llm/response/ai21_response.rb +20 -0
- data/lib/langchain/tool_definition.rb +7 -0
- data/lib/langchain/utils/image_wrapper.rb +37 -0
- data/lib/langchain/version.rb +1 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 6c33b1ac19c1cf4a3a7de8fbf5d2568899d05662cb7db0b96321186c255ef312
|
4
|
+
data.tar.gz: 96bad2ff6f9b9a9cbac26699b525a8646482a0a77131d58fed84de3bafcb074e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: aef30f32cfbdba307372ab8b20a0a484d22bd72f68d614056c6abb016b16dbd5e7a51dc15fb26746fd7a88a429d6282848aace42076c41e4fb31d58be9fb27f6
|
7
|
+
data.tar.gz: 3fd751a81f1121209ae2fac6bd25bb2e9d524b23dd5afe83967e10a78521e6994b7ca855e7e4343dded1117c5a7377d26cf15607f16247e9f220a27d17b91827
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,35 @@
|
|
1
|
+
# CHANGELOG
|
2
|
+
|
3
|
+
## Key
|
4
|
+
- [BREAKING]: A breaking change. After an upgrade, your app may need modifications to keep working correctly.
|
5
|
+
- [FEATURE]: A non-breaking improvement to the app. Either introduces new functionality, or improves on an existing feature.
|
6
|
+
- [BUGFIX]: Fixes a bug with a non-breaking change.
|
7
|
+
- [COMPAT]: Compatibility improvements - changes to make Langchain.rb more compatible with different dependency versions.
|
8
|
+
- [OPTIM]: Optimization or performance increase.
|
9
|
+
- [DOCS]: Documentation changes. No changes to the library's behavior.
|
10
|
+
- [SECURITY]: A change which fixes a security vulnerability.
|
11
|
+
|
1
12
|
## [Unreleased]
|
2
13
|
|
14
|
+
## [0.19.1] - 2024-11-21
|
15
|
+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/858] Assistant, when using Anthropic, now also accepts image_url in the message.
|
16
|
+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/861] Clean up passing `max_tokens` to Anthropic constructor and chat method
|
17
|
+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/849] Langchain::Assistant now works with AWS Bedrock-hosted Anthropic models
|
18
|
+
- [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/867] Refactor `GoogleGeminiMessage#to_hash` and `OpenAIMessage#to_hash` methods.
|
19
|
+
- [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/849] Simplify Langchain::LLM::AwsBedrock class
|
20
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/869] AnthropicMessage now correctly handles tool calls with content.
|
21
|
+
- [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/870] Assistant, when using Ollama (e.g.: llava model), now also accepts image_url in the message.
|
22
|
+
|
23
|
+
## [0.19.0] - 2024-10-23
|
24
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.
|
25
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters.
|
26
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `embeddings_model_name` parameter to `embedding_model` in Langchain::LLM parameters.
|
27
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/850/] Fix MistralAIMessage to handle "Tool" Output
|
28
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic
|
29
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message
|
30
|
+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/838] Allow setting safety_settings: [] in default_options for Langchain::LLM::GoogleGemini and Langchain::LLM::GoogleVertexAI constructors
|
31
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/871] Allow passing in options hash to Ollama
|
32
|
+
|
3
33
|
## [0.18.0] - 2024-10-12
|
4
34
|
- [BREAKING] Remove `Langchain::Assistant#clear_thread!` method
|
5
35
|
- [BREAKING] `Langchain::Messages::*` namespace had migrated to `Langchain::Assistant::Messages::*`
|
data/README.md
CHANGED
@@ -86,7 +86,7 @@ Most LLM classes can be initialized with an API key and optional default options
|
|
86
86
|
```ruby
|
87
87
|
llm = Langchain::LLM::OpenAI.new(
|
88
88
|
api_key: ENV["OPENAI_API_KEY"],
|
89
|
-
default_options: { temperature: 0.7,
|
89
|
+
default_options: { temperature: 0.7, chat_model: "gpt-4o" }
|
90
90
|
)
|
91
91
|
```
|
92
92
|
|
@@ -133,7 +133,7 @@ messages = [
|
|
133
133
|
{ role: "system", content: "You are a helpful assistant." },
|
134
134
|
{ role: "user", content: "What's the weather like today?" }
|
135
135
|
# Google Gemini and Google VertexAI expect messages in a different format:
|
136
|
-
# { role: "user", parts: [{ text: "why is the sky blue?" }]
|
136
|
+
# { role: "user", parts: [{ text: "why is the sky blue?" }]}
|
137
137
|
]
|
138
138
|
response = llm.chat(messages: messages)
|
139
139
|
chat_completion = response.chat_completion
|
@@ -505,7 +505,7 @@ assistant.add_message_and_run!(content: "What's the latest news about AI?")
|
|
505
505
|
# Supply an image to the assistant
|
506
506
|
assistant.add_message_and_run!(
|
507
507
|
content: "Show me a picture of a cat",
|
508
|
-
|
508
|
+
image_url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
509
509
|
)
|
510
510
|
|
511
511
|
# Access the conversation thread
|
@@ -558,7 +558,7 @@ Note that streaming is not currently supported for all LLMs.
|
|
558
558
|
The Langchain::Assistant can be easily extended with custom tools by creating classes that `extend Langchain::ToolDefinition` module and implement required methods.
|
559
559
|
```ruby
|
560
560
|
class MovieInfoTool
|
561
|
-
|
561
|
+
extend Langchain::ToolDefinition
|
562
562
|
|
563
563
|
define_function :search_movie, description: "MovieInfoTool: Search for a movie by title" do
|
564
564
|
property :query, type: "string", description: "The movie title to search for", required: true
|
@@ -6,16 +6,17 @@ module Langchain
|
|
6
6
|
# TODO: Fix the message truncation when context window is exceeded
|
7
7
|
class Adapter
|
8
8
|
def self.build(llm)
|
9
|
-
|
10
|
-
when Langchain::LLM::Anthropic
|
9
|
+
if llm.is_a?(Langchain::LLM::Anthropic)
|
11
10
|
LLM::Adapters::Anthropic.new
|
12
|
-
|
11
|
+
elsif llm.is_a?(Langchain::LLM::AwsBedrock) && llm.defaults[:chat_model].include?("anthropic")
|
12
|
+
LLM::Adapters::AwsBedrockAnthropic.new
|
13
|
+
elsif llm.is_a?(Langchain::LLM::GoogleGemini) || llm.is_a?(Langchain::LLM::GoogleVertexAI)
|
13
14
|
LLM::Adapters::GoogleGemini.new
|
14
|
-
|
15
|
+
elsif llm.is_a?(Langchain::LLM::MistralAI)
|
15
16
|
LLM::Adapters::MistralAI.new
|
16
|
-
|
17
|
+
elsif llm.is_a?(Langchain::LLM::Ollama)
|
17
18
|
LLM::Adapters::Ollama.new
|
18
|
-
|
19
|
+
elsif llm.is_a?(Langchain::LLM::OpenAI)
|
19
20
|
LLM::Adapters::OpenAI.new
|
20
21
|
else
|
21
22
|
raise ArgumentError, "Unsupported LLM type: #{llm.class}"
|
@@ -38,9 +38,7 @@ module Langchain
|
|
38
38
|
# @param tool_call_id [String] The tool call ID
|
39
39
|
# @return [Messages::AnthropicMessage] The Anthropic message
|
40
40
|
def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
41
|
-
|
42
|
-
|
43
|
-
Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
41
|
+
Messages::AnthropicMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
44
42
|
end
|
45
43
|
|
46
44
|
# Extract the tool call information from the Anthropic tool call hash
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain
|
4
|
+
class Assistant
|
5
|
+
module LLM
|
6
|
+
module Adapters
|
7
|
+
class AwsBedrockAnthropic < Anthropic
|
8
|
+
private
|
9
|
+
|
10
|
+
# @param [String] choice
|
11
|
+
# @param [Boolean] _parallel_tool_calls
|
12
|
+
# @return [Hash]
|
13
|
+
def build_tool_choice(choice, _parallel_tool_calls)
|
14
|
+
# Aws Bedrock hosted Anthropic does not support parallel tool calls
|
15
|
+
Langchain.logger.warn "WARNING: parallel_tool_calls is not supported by AWS Bedrock Anthropic currently"
|
16
|
+
|
17
|
+
tool_choice_object = {}
|
18
|
+
|
19
|
+
case choice
|
20
|
+
when "auto"
|
21
|
+
tool_choice_object[:type] = "auto"
|
22
|
+
when "any"
|
23
|
+
tool_choice_object[:type] = "any"
|
24
|
+
else
|
25
|
+
tool_choice_object[:type] = "tool"
|
26
|
+
tool_choice_object[:name] = choice
|
27
|
+
end
|
28
|
+
|
29
|
+
tool_choice_object
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -39,9 +39,7 @@ module Langchain
|
|
39
39
|
# @param tool_call_id [String] The tool call ID
|
40
40
|
# @return [Messages::OllamaMessage] The Ollama message
|
41
41
|
def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
42
|
-
|
43
|
-
|
44
|
-
Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
42
|
+
Messages::OllamaMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
45
43
|
end
|
46
44
|
|
47
45
|
# Extract the tool call information from the OpenAI tool call hash
|
@@ -12,13 +12,26 @@ module Langchain
|
|
12
12
|
|
13
13
|
TOOL_ROLE = "tool_result"
|
14
14
|
|
15
|
-
|
15
|
+
# Initialize a new Anthropic message
|
16
|
+
#
|
17
|
+
# @param role [String] The role of the message
|
18
|
+
# @param content [String] The content of the message
|
19
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
20
|
+
# @param tool_call_id [String] The ID of the tool call
|
21
|
+
def initialize(
|
22
|
+
role:,
|
23
|
+
content: nil,
|
24
|
+
image_url: nil,
|
25
|
+
tool_calls: [],
|
26
|
+
tool_call_id: nil
|
27
|
+
)
|
16
28
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
17
29
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
18
30
|
|
19
31
|
@role = role
|
20
32
|
# Some Tools return content as a JSON hence `.to_s`
|
21
33
|
@content = content.to_s
|
34
|
+
@image_url = image_url
|
22
35
|
@tool_calls = tool_calls
|
23
36
|
@tool_call_id = tool_call_id
|
24
37
|
end
|
@@ -27,23 +40,82 @@ module Langchain
|
|
27
40
|
#
|
28
41
|
# @return [Hash] The message as an Anthropic API-compatible hash
|
29
42
|
def to_hash
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
43
|
+
if assistant?
|
44
|
+
assistant_hash
|
45
|
+
elsif tool?
|
46
|
+
tool_hash
|
47
|
+
elsif user?
|
48
|
+
user_hash
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Convert the message to an Anthropic API-compatible hash
|
53
|
+
#
|
54
|
+
# @return [Hash] The message as an Anthropic API-compatible hash, with the role as "assistant"
|
55
|
+
def assistant_hash
|
56
|
+
{
|
57
|
+
role: "assistant",
|
58
|
+
content: [
|
59
|
+
{
|
60
|
+
type: "text",
|
61
|
+
text: content
|
62
|
+
}
|
63
|
+
].concat(tool_calls)
|
64
|
+
}
|
65
|
+
end
|
66
|
+
|
67
|
+
# Convert the message to an Anthropic API-compatible hash
|
68
|
+
#
|
69
|
+
# @return [Hash] The message as an Anthropic API-compatible hash, with the role as "user"
|
70
|
+
def tool_hash
|
71
|
+
{
|
72
|
+
role: "user",
|
73
|
+
# TODO: Tool can also return images
|
74
|
+
# https://docs.anthropic.com/en/docs/build-with-claude/tool-use#handling-tool-use-and-tool-result-content-blocks
|
75
|
+
content: [
|
76
|
+
{
|
77
|
+
type: "tool_result",
|
78
|
+
tool_use_id: tool_call_id,
|
79
|
+
content: content
|
80
|
+
}
|
81
|
+
]
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
# Convert the message to an Anthropic API-compatible hash
|
86
|
+
#
|
87
|
+
# @return [Hash] The message as an Anthropic API-compatible hash, with the role as "user"
|
88
|
+
def user_hash
|
89
|
+
{
|
90
|
+
role: "user",
|
91
|
+
content: build_content_array
|
92
|
+
}
|
93
|
+
end
|
94
|
+
|
95
|
+
# Builds the content value for the message hash
|
96
|
+
# @return [Array<Hash>] An array of content hashes
|
97
|
+
def build_content_array
|
98
|
+
content_details = []
|
99
|
+
|
100
|
+
if content && !content.empty?
|
101
|
+
content_details << {
|
102
|
+
type: "text",
|
103
|
+
text: content
|
104
|
+
}
|
46
105
|
end
|
106
|
+
|
107
|
+
if image
|
108
|
+
content_details << {
|
109
|
+
type: "image",
|
110
|
+
source: {
|
111
|
+
type: "base64",
|
112
|
+
data: image.base64,
|
113
|
+
media_type: image.mime_type
|
114
|
+
}
|
115
|
+
}
|
116
|
+
end
|
117
|
+
|
118
|
+
content_details
|
47
119
|
end
|
48
120
|
|
49
121
|
# Check if the message is a tool call
|
@@ -15,10 +15,10 @@ module Langchain
|
|
15
15
|
|
16
16
|
# Initialize a new Google Gemini message
|
17
17
|
#
|
18
|
-
# @param [String] The role of the message
|
19
|
-
# @param [String] The content of the message
|
20
|
-
# @param [Array<Hash>] The tool calls made in the message
|
21
|
-
# @param [String] The ID of the tool call
|
18
|
+
# @param role [String] The role of the message
|
19
|
+
# @param content [String] The content of the message
|
20
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
21
|
+
# @param tool_call_id [String] The ID of the tool call
|
22
22
|
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
23
23
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
24
24
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
@@ -41,23 +41,12 @@ module Langchain
|
|
41
41
|
#
|
42
42
|
# @return [Hash] The message as a Google Gemini API-compatible hash
|
43
43
|
def to_hash
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
response: {
|
51
|
-
name: tool_call_id,
|
52
|
-
content: content
|
53
|
-
}
|
54
|
-
}
|
55
|
-
}]
|
56
|
-
elsif tool_calls.any?
|
57
|
-
tool_calls
|
58
|
-
else
|
59
|
-
[{text: content}]
|
60
|
-
end
|
44
|
+
if tool?
|
45
|
+
tool_hash
|
46
|
+
elsif model?
|
47
|
+
model_hash
|
48
|
+
elsif user?
|
49
|
+
user_hash
|
61
50
|
end
|
62
51
|
end
|
63
52
|
|
@@ -73,6 +62,13 @@ module Langchain
|
|
73
62
|
function?
|
74
63
|
end
|
75
64
|
|
65
|
+
# Check if the message is a user call
|
66
|
+
#
|
67
|
+
# @return [Boolean] true/false whether this message is a user call
|
68
|
+
def user?
|
69
|
+
role == "user"
|
70
|
+
end
|
71
|
+
|
76
72
|
# Check if the message is a tool call
|
77
73
|
#
|
78
74
|
# @return [Boolean] true/false whether this message is a tool call
|
@@ -80,6 +76,51 @@ module Langchain
|
|
80
76
|
role == "function"
|
81
77
|
end
|
82
78
|
|
79
|
+
# Convert the message to an GoogleGemini API-compatible hash
|
80
|
+
# @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "model"
|
81
|
+
def model_hash
|
82
|
+
{
|
83
|
+
role: role,
|
84
|
+
parts: build_parts
|
85
|
+
}
|
86
|
+
end
|
87
|
+
|
88
|
+
# Convert the message to an GoogleGemini API-compatible hash
|
89
|
+
# @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "function"
|
90
|
+
def tool_hash
|
91
|
+
{
|
92
|
+
role: role,
|
93
|
+
parts: [{
|
94
|
+
functionResponse: {
|
95
|
+
name: tool_call_id,
|
96
|
+
response: {
|
97
|
+
name: tool_call_id,
|
98
|
+
content: content
|
99
|
+
}
|
100
|
+
}
|
101
|
+
}]
|
102
|
+
}
|
103
|
+
end
|
104
|
+
|
105
|
+
# Convert the message to an GoogleGemini API-compatible hash
|
106
|
+
# @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "user"
|
107
|
+
def user_hash
|
108
|
+
{
|
109
|
+
role: role,
|
110
|
+
parts: build_parts
|
111
|
+
}
|
112
|
+
end
|
113
|
+
|
114
|
+
# Builds the part value for the message hash
|
115
|
+
# @return [Array<Hash>] An array of content hashes of the text or of the tool calls if present
|
116
|
+
def build_parts
|
117
|
+
if tool_calls.any?
|
118
|
+
tool_calls
|
119
|
+
else
|
120
|
+
[{text: content}]
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
83
124
|
# Check if the message came from an LLM
|
84
125
|
#
|
85
126
|
# @return [Boolean] true/false whether this message was produced by an LLM
|
@@ -45,30 +45,14 @@ module Langchain
|
|
45
45
|
#
|
46
46
|
# @return [Hash] The message as an MistralAI API-compatible hash
|
47
47
|
def to_hash
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
h[:content] = []
|
57
|
-
|
58
|
-
if content && !content.empty?
|
59
|
-
h[:content] << {
|
60
|
-
type: "text",
|
61
|
-
text: content
|
62
|
-
}
|
63
|
-
end
|
64
|
-
|
65
|
-
if image_url
|
66
|
-
h[:content] << {
|
67
|
-
type: "image_url",
|
68
|
-
image_url: image_url
|
69
|
-
}
|
70
|
-
end
|
71
|
-
end
|
48
|
+
if assistant?
|
49
|
+
assistant_hash
|
50
|
+
elsif system?
|
51
|
+
system_hash
|
52
|
+
elsif tool?
|
53
|
+
tool_hash
|
54
|
+
elsif user?
|
55
|
+
user_hash
|
72
56
|
end
|
73
57
|
end
|
74
58
|
|
@@ -92,6 +76,67 @@ module Langchain
|
|
92
76
|
def tool?
|
93
77
|
role == "tool"
|
94
78
|
end
|
79
|
+
|
80
|
+
# Convert the message to an MistralAI API-compatible hash
|
81
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "assistant"
|
82
|
+
def assistant_hash
|
83
|
+
{
|
84
|
+
role: "assistant",
|
85
|
+
content: content,
|
86
|
+
tool_calls: tool_calls,
|
87
|
+
prefix: false
|
88
|
+
}
|
89
|
+
end
|
90
|
+
|
91
|
+
# Convert the message to an MistralAI API-compatible hash
|
92
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "system"
|
93
|
+
def system_hash
|
94
|
+
{
|
95
|
+
role: "system",
|
96
|
+
content: build_content_array
|
97
|
+
}
|
98
|
+
end
|
99
|
+
|
100
|
+
# Convert the message to an MistralAI API-compatible hash
|
101
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "tool"
|
102
|
+
def tool_hash
|
103
|
+
{
|
104
|
+
role: "tool",
|
105
|
+
content: content,
|
106
|
+
tool_call_id: tool_call_id
|
107
|
+
}
|
108
|
+
end
|
109
|
+
|
110
|
+
# Convert the message to an MistralAI API-compatible hash
|
111
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "user"
|
112
|
+
def user_hash
|
113
|
+
{
|
114
|
+
role: "user",
|
115
|
+
content: build_content_array
|
116
|
+
}
|
117
|
+
end
|
118
|
+
|
119
|
+
# Builds the content value for the message hash
|
120
|
+
# @return [Array<Hash>] An array of content hashes, with keys :type and :text or :image_url.
|
121
|
+
def build_content_array
|
122
|
+
content_details = []
|
123
|
+
|
124
|
+
if content && !content.empty?
|
125
|
+
content_details << {
|
126
|
+
type: "text",
|
127
|
+
text: content
|
128
|
+
}
|
129
|
+
end
|
130
|
+
|
131
|
+
if image_url
|
132
|
+
content_details << {
|
133
|
+
type: "image_url",
|
134
|
+
image_url: image_url
|
135
|
+
}
|
136
|
+
end
|
137
|
+
|
138
|
+
content_details
|
139
|
+
end
|
95
140
|
end
|
96
141
|
end
|
97
142
|
end
|
@@ -16,17 +16,20 @@ module Langchain
|
|
16
16
|
|
17
17
|
# Initialize a new OpenAI message
|
18
18
|
#
|
19
|
-
# @param [String] The role of the message
|
20
|
-
# @param [String] The content of the message
|
21
|
-
# @param [
|
22
|
-
# @param [
|
23
|
-
|
19
|
+
# @param role [String] The role of the message
|
20
|
+
# @param content [String] The content of the message
|
21
|
+
# @param image_url [String] The URL of the image to include in the message
|
22
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
23
|
+
# @param tool_call_id [String] The ID of the tool call
|
24
|
+
def initialize(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
|
24
25
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
25
26
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
27
|
+
raise ArgumentError, "image_url must be a valid url" if image_url && !URI::DEFAULT_PARSER.make_regexp.match?(image_url)
|
26
28
|
|
27
29
|
@role = role
|
28
30
|
# Some Tools return content as a JSON hence `.to_s`
|
29
31
|
@content = content.to_s
|
32
|
+
@image_url = image_url
|
30
33
|
@tool_calls = tool_calls
|
31
34
|
@tool_call_id = tool_call_id
|
32
35
|
end
|
@@ -38,6 +41,7 @@ module Langchain
|
|
38
41
|
{}.tap do |h|
|
39
42
|
h[:role] = role
|
40
43
|
h[:content] = content if content # Content is nil for tool calls
|
44
|
+
h[:images] = [image.base64] if image
|
41
45
|
h[:tool_calls] = tool_calls if tool_calls.any?
|
42
46
|
h[:tool_call_id] = tool_call_id if tool_call_id
|
43
47
|
end
|