langchainrb 0.19.0 → 0.19.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 89fdd6c82d689f6e7057133eab08ef8367e72e211d3a59df423ce26cd3b2c04d
4
- data.tar.gz: e77493aec62198a014b0296490779b18016407c1808de45020e25b63a8d42d17
3
+ metadata.gz: 86b8bd9a7b846fb7be528116984e3e6353ea36ee75f06ddae8872c9b70c8deee
4
+ data.tar.gz: 3cc860f9d448d5e7df4eef2ea7e136e2381f3aecee4ffd245ebd41df7b9bd650
5
5
  SHA512:
6
- metadata.gz: 9040755869f6cbf666f8ec1225b976d095a809915c533e8102b281bc8c66d4751e93abbef19a4c27d806e5abd9555690a4d89401e7d06c29d8e4e9b6252bff4f
7
- data.tar.gz: 23287eb713d76ed824e13e4b2e07e66f342a08177b39b7fa2d6fb85af25f3f0ed2bc898d6b1316c2859963526f7cf1e635c413a8786ae671399ff50b87697f82
6
+ metadata.gz: 55128bb86766b2fb3e852764603df1786df971f736328f115581666eb7779249b0236058edf31a53a8197929496b2d1d5f667c26e04ff759a6763028d892735a
7
+ data.tar.gz: '028c8049dd2d2642d4652dfe1df94d7f94a4acfadb5cf61e46b2ac82a392022eafde5e5be8820f3919c1ffe1c73693d56b09bf60fbcdb1e207ae3abcec5abaaa'
data/CHANGELOG.md CHANGED
@@ -4,13 +4,25 @@
4
4
  - [BREAKING]: A breaking change. After an upgrade, your app may need modifications to keep working correctly.
5
5
  - [FEATURE]: A non-breaking improvement to the app. Either introduces new functionality, or improves on an existing feature.
6
6
  - [BUGFIX]: Fixes a bug with a non-breaking change.
7
- - [COMPAT]: Compatibility improvements - changes to make Administrate more compatible with different dependency versions.
7
+ - [COMPAT]: Compatibility improvements - changes to make Langchain.rb more compatible with different dependency versions.
8
8
  - [OPTIM]: Optimization or performance increase.
9
9
  - [DOCS]: Documentation changes. No changes to the library's behavior.
10
10
  - [SECURITY]: A change which fixes a security vulnerability.
11
11
 
12
12
  ## [Unreleased]
13
13
 
14
+ ## [0.19.2] - 2024-11-26
15
+ - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/884] Add `tool_execution_callback` to `Langchain::Assistant`, a callback function (proc, lambda) that is called right before a tool is executed
16
+
17
+ ## [0.19.1] - 2024-11-21
18
+ - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/858] Assistant, when using Anthropic, now also accepts image_url in the message.
19
+ - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/861] Clean up passing `max_tokens` to Anthropic constructor and chat method
20
+ - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/849] Langchain::Assistant now works with AWS Bedrock-hosted Anthropic models
21
+ - [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/867] Refactor `GoogleGeminiMessage#to_hash` and `OpenAIMessage#to_hash` methods.
22
+ - [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/849] Simplify Langchain::LLM::AwsBedrock class
23
+ - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/869] AnthropicMessage now correctly handles tool calls with content.
24
+ - [OPTIM] [https://github.com/patterns-ai-core/langchainrb/pull/870] Assistant, when using Ollama (e.g.: llava model), now also accepts image_url in the message.
25
+
14
26
  ## [0.19.0] - 2024-10-23
15
27
  - [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.
16
28
  - [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters.
@@ -19,6 +31,7 @@
19
31
  - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic
20
32
  - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message
21
33
  - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/838] Allow setting safety_settings: [] in default_options for Langchain::LLM::GoogleGemini and Langchain::LLM::GoogleVertexAI constructors
34
+ - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/871] Allow passing in options hash to Ollama
22
35
 
23
36
  ## [0.18.0] - 2024-10-12
24
37
  - [BREAKING] Remove `Langchain::Assistant#clear_thread!` method
data/README.md CHANGED
@@ -133,7 +133,7 @@ messages = [
133
133
  { role: "system", content: "You are a helpful assistant." },
134
134
  { role: "user", content: "What's the weather like today?" }
135
135
  # Google Gemini and Google VertexAI expect messages in a different format:
136
- # { role: "user", parts: [{ text: "why is the sky blue?" }]
136
+ # { role: "user", parts: [{ text: "why is the sky blue?" }]}
137
137
  ]
138
138
  response = llm.chat(messages: messages)
139
139
  chat_completion = response.chat_completion
@@ -536,6 +536,13 @@ Note that streaming is not currently supported for all LLMs.
536
536
  * `tool_choice`: Specifies how tools should be selected. Default: "auto". A specific tool function name can be passed. This will force the Assistant to **always** use this function.
537
537
  * `parallel_tool_calls`: Whether to make multiple parallel tool calls. Default: true
538
538
  * `add_message_callback`: A callback function (proc, lambda) that is called when any message is added to the conversation (optional)
539
+ ```ruby
540
+ assistant.add_message_callback = -> (message) { puts "New message: #{message}" }
541
+ ```
542
+ * `tool_execution_callback`: A callback function (proc, lambda) that is called right before a tool is executed (optional)
543
+ ```ruby
544
+ assistant.tool_execution_callback = -> (tool_call_id, tool_name, method_name, tool_arguments) { puts "Executing tool_call_id: #{tool_call_id}, tool_name: #{tool_name}, method_name: #{method_name}, tool_arguments: #{tool_arguments}" }
545
+ ```
539
546
 
540
547
  ### Key Methods
541
548
  * `add_message`: Adds a user message to the messages array
@@ -558,7 +565,7 @@ Note that streaming is not currently supported for all LLMs.
558
565
  The Langchain::Assistant can be easily extended with custom tools by creating classes that `extend Langchain::ToolDefinition` module and implement required methods.
559
566
  ```ruby
560
567
  class MovieInfoTool
561
- include Langchain::ToolDefinition
568
+ extend Langchain::ToolDefinition
562
569
 
563
570
  define_function :search_movie, description: "MovieInfoTool: Search for a movie by title" do
564
571
  property :query, type: "string", description: "The movie title to search for", required: true
@@ -6,16 +6,17 @@ module Langchain
6
6
  # TODO: Fix the message truncation when context window is exceeded
7
7
  class Adapter
8
8
  def self.build(llm)
9
- case llm
10
- when Langchain::LLM::Anthropic
9
+ if llm.is_a?(Langchain::LLM::Anthropic)
11
10
  LLM::Adapters::Anthropic.new
12
- when Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI
11
+ elsif llm.is_a?(Langchain::LLM::AwsBedrock) && llm.defaults[:chat_model].include?("anthropic")
12
+ LLM::Adapters::AwsBedrockAnthropic.new
13
+ elsif llm.is_a?(Langchain::LLM::GoogleGemini) || llm.is_a?(Langchain::LLM::GoogleVertexAI)
13
14
  LLM::Adapters::GoogleGemini.new
14
- when Langchain::LLM::MistralAI
15
+ elsif llm.is_a?(Langchain::LLM::MistralAI)
15
16
  LLM::Adapters::MistralAI.new
16
- when Langchain::LLM::Ollama
17
+ elsif llm.is_a?(Langchain::LLM::Ollama)
17
18
  LLM::Adapters::Ollama.new
18
- when Langchain::LLM::OpenAI
19
+ elsif llm.is_a?(Langchain::LLM::OpenAI)
19
20
  LLM::Adapters::OpenAI.new
20
21
  else
21
22
  raise ArgumentError, "Unsupported LLM type: #{llm.class}"
@@ -38,9 +38,7 @@ module Langchain
38
38
  # @param tool_call_id [String] The tool call ID
39
39
  # @return [Messages::AnthropicMessage] The Anthropic message
40
40
  def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
41
- Langchain.logger.warn "WARNING: Image URL is not supported by Anthropic currently" if image_url
42
-
43
- Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
41
+ Messages::AnthropicMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
44
42
  end
45
43
 
46
44
  # Extract the tool call information from the Anthropic tool call hash
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Assistant
5
+ module LLM
6
+ module Adapters
7
+ class AwsBedrockAnthropic < Anthropic
8
+ private
9
+
10
+ # @param [String] choice
11
+ # @param [Boolean] _parallel_tool_calls
12
+ # @return [Hash]
13
+ def build_tool_choice(choice, _parallel_tool_calls)
14
+ # Aws Bedrock hosted Anthropic does not support parallel tool calls
15
+ Langchain.logger.warn "WARNING: parallel_tool_calls is not supported by AWS Bedrock Anthropic currently"
16
+
17
+ tool_choice_object = {}
18
+
19
+ case choice
20
+ when "auto"
21
+ tool_choice_object[:type] = "auto"
22
+ when "any"
23
+ tool_choice_object[:type] = "any"
24
+ else
25
+ tool_choice_object[:type] = "tool"
26
+ tool_choice_object[:name] = choice
27
+ end
28
+
29
+ tool_choice_object
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
@@ -39,9 +39,7 @@ module Langchain
39
39
  # @param tool_call_id [String] The tool call ID
40
40
  # @return [Messages::OllamaMessage] The Ollama message
41
41
  def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
42
- Langchain.logger.warn "WARNING: Image URL is not supported by Ollama currently" if image_url
43
-
44
- Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
42
+ Messages::OllamaMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
45
43
  end
46
44
 
47
45
  # Extract the tool call information from the OpenAI tool call hash
@@ -18,13 +18,20 @@ module Langchain
18
18
  # @param content [String] The content of the message
19
19
  # @param tool_calls [Array<Hash>] The tool calls made in the message
20
20
  # @param tool_call_id [String] The ID of the tool call
21
- def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
21
+ def initialize(
22
+ role:,
23
+ content: nil,
24
+ image_url: nil,
25
+ tool_calls: [],
26
+ tool_call_id: nil
27
+ )
22
28
  raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
23
29
  raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
24
30
 
25
31
  @role = role
26
32
  # Some Tools return content as a JSON hence `.to_s`
27
33
  @content = content.to_s
34
+ @image_url = image_url
28
35
  @tool_calls = tool_calls
29
36
  @tool_call_id = tool_call_id
30
37
  end
@@ -33,25 +40,84 @@ module Langchain
33
40
  #
34
41
  # @return [Hash] The message as an Anthropic API-compatible hash
35
42
  def to_hash
36
- {}.tap do |h|
37
- h[:role] = tool? ? "user" : role
38
-
39
- h[:content] = if tool?
40
- [
41
- {
42
- type: "tool_result",
43
- tool_use_id: tool_call_id,
44
- content: content
45
- }
46
- ]
47
- elsif tool_calls.any?
48
- tool_calls
49
- else
50
- content
51
- end
43
+ if assistant?
44
+ assistant_hash
45
+ elsif tool?
46
+ tool_hash
47
+ elsif user?
48
+ user_hash
52
49
  end
53
50
  end
54
51
 
52
+ # Convert the message to an Anthropic API-compatible hash
53
+ #
54
+ # @return [Hash] The message as an Anthropic API-compatible hash, with the role as "assistant"
55
+ def assistant_hash
56
+ {
57
+ role: "assistant",
58
+ content: [
59
+ {
60
+ type: "text",
61
+ text: content
62
+ }
63
+ ].concat(tool_calls)
64
+ }
65
+ end
66
+
67
+ # Convert the message to an Anthropic API-compatible hash
68
+ #
69
+ # @return [Hash] The message as an Anthropic API-compatible hash, with the role as "user"
70
+ def tool_hash
71
+ {
72
+ role: "user",
73
+ # TODO: Tool can also return images
74
+ # https://docs.anthropic.com/en/docs/build-with-claude/tool-use#handling-tool-use-and-tool-result-content-blocks
75
+ content: [
76
+ {
77
+ type: "tool_result",
78
+ tool_use_id: tool_call_id,
79
+ content: content
80
+ }
81
+ ]
82
+ }
83
+ end
84
+
85
+ # Convert the message to an Anthropic API-compatible hash
86
+ #
87
+ # @return [Hash] The message as an Anthropic API-compatible hash, with the role as "user"
88
+ def user_hash
89
+ {
90
+ role: "user",
91
+ content: build_content_array
92
+ }
93
+ end
94
+
95
+ # Builds the content value for the message hash
96
+ # @return [Array<Hash>] An array of content hashes
97
+ def build_content_array
98
+ content_details = []
99
+
100
+ if content && !content.empty?
101
+ content_details << {
102
+ type: "text",
103
+ text: content
104
+ }
105
+ end
106
+
107
+ if image
108
+ content_details << {
109
+ type: "image",
110
+ source: {
111
+ type: "base64",
112
+ data: image.base64,
113
+ media_type: image.mime_type
114
+ }
115
+ }
116
+ end
117
+
118
+ content_details
119
+ end
120
+
55
121
  # Check if the message is a tool call
56
122
  #
57
123
  # @return [Boolean] true/false whether this message is a tool call
@@ -50,6 +50,10 @@ module Langchain
50
50
  # TODO: Should we return :unknown or raise an error?
51
51
  :unknown
52
52
  end
53
+
54
+ def image
55
+ image_url ? Utils::ImageWrapper.new(image_url) : nil
56
+ end
53
57
  end
54
58
  end
55
59
  end
@@ -41,23 +41,12 @@ module Langchain
41
41
  #
42
42
  # @return [Hash] The message as a Google Gemini API-compatible hash
43
43
  def to_hash
44
- {}.tap do |h|
45
- h[:role] = role
46
- h[:parts] = if function?
47
- [{
48
- functionResponse: {
49
- name: tool_call_id,
50
- response: {
51
- name: tool_call_id,
52
- content: content
53
- }
54
- }
55
- }]
56
- elsif tool_calls.any?
57
- tool_calls
58
- else
59
- [{text: content}]
60
- end
44
+ if tool?
45
+ tool_hash
46
+ elsif model?
47
+ model_hash
48
+ elsif user?
49
+ user_hash
61
50
  end
62
51
  end
63
52
 
@@ -73,6 +62,13 @@ module Langchain
73
62
  function?
74
63
  end
75
64
 
65
+ # Check if the message is a user call
66
+ #
67
+ # @return [Boolean] true/false whether this message is a user call
68
+ def user?
69
+ role == "user"
70
+ end
71
+
76
72
  # Check if the message is a tool call
77
73
  #
78
74
  # @return [Boolean] true/false whether this message is a tool call
@@ -80,6 +76,51 @@ module Langchain
80
76
  role == "function"
81
77
  end
82
78
 
79
+ # Convert the message to an GoogleGemini API-compatible hash
80
+ # @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "model"
81
+ def model_hash
82
+ {
83
+ role: role,
84
+ parts: build_parts
85
+ }
86
+ end
87
+
88
+ # Convert the message to an GoogleGemini API-compatible hash
89
+ # @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "function"
90
+ def tool_hash
91
+ {
92
+ role: role,
93
+ parts: [{
94
+ functionResponse: {
95
+ name: tool_call_id,
96
+ response: {
97
+ name: tool_call_id,
98
+ content: content
99
+ }
100
+ }
101
+ }]
102
+ }
103
+ end
104
+
105
+ # Convert the message to an GoogleGemini API-compatible hash
106
+ # @return [Hash] The message as an GoogleGemini API-compatible hash, with the role as "user"
107
+ def user_hash
108
+ {
109
+ role: role,
110
+ parts: build_parts
111
+ }
112
+ end
113
+
114
+ # Builds the part value for the message hash
115
+ # @return [Array<Hash>] An array of content hashes of the text or of the tool calls if present
116
+ def build_parts
117
+ if tool_calls.any?
118
+ tool_calls
119
+ else
120
+ [{text: content}]
121
+ end
122
+ end
123
+
83
124
  # Check if the message came from an LLM
84
125
  #
85
126
  # @return [Boolean] true/false whether this message was produced by an LLM
@@ -18,15 +18,18 @@ module Langchain
18
18
  #
19
19
  # @param role [String] The role of the message
20
20
  # @param content [String] The content of the message
21
+ # @param image_url [String] The URL of the image to include in the message
21
22
  # @param tool_calls [Array<Hash>] The tool calls made in the message
22
23
  # @param tool_call_id [String] The ID of the tool call
23
- def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
24
+ def initialize(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
24
25
  raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
25
26
  raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
27
+ raise ArgumentError, "image_url must be a valid url" if image_url && !URI::DEFAULT_PARSER.make_regexp.match?(image_url)
26
28
 
27
29
  @role = role
28
30
  # Some Tools return content as a JSON hence `.to_s`
29
31
  @content = content.to_s
32
+ @image_url = image_url
30
33
  @tool_calls = tool_calls
31
34
  @tool_call_id = tool_call_id
32
35
  end
@@ -38,6 +41,7 @@ module Langchain
38
41
  {}.tap do |h|
39
42
  h[:role] = role
40
43
  h[:content] = content if content # Content is nil for tool calls
44
+ h[:images] = [image.base64] if image
41
45
  h[:tool_calls] = tool_calls if tool_calls.any?
42
46
  h[:tool_call_id] = tool_call_id if tool_call_id
43
47
  end
@@ -50,32 +50,14 @@ module Langchain
50
50
  #
51
51
  # @return [Hash] The message as an OpenAI API-compatible hash
52
52
  def to_hash
53
- {}.tap do |h|
54
- h[:role] = role
55
-
56
- if tool_calls.any?
57
- h[:tool_calls] = tool_calls
58
- else
59
- h[:tool_call_id] = tool_call_id if tool_call_id
60
-
61
- h[:content] = []
62
-
63
- if content && !content.empty?
64
- h[:content] << {
65
- type: "text",
66
- text: content
67
- }
68
- end
69
-
70
- if image_url
71
- h[:content] << {
72
- type: "image_url",
73
- image_url: {
74
- url: image_url
75
- }
76
- }
77
- end
78
- end
53
+ if assistant?
54
+ assistant_hash
55
+ elsif system?
56
+ system_hash
57
+ elsif tool?
58
+ tool_hash
59
+ elsif user?
60
+ user_hash
79
61
  end
80
62
  end
81
63
 
@@ -99,6 +81,76 @@ module Langchain
99
81
  def tool?
100
82
  role == "tool"
101
83
  end
84
+
85
+ def user?
86
+ role == "user"
87
+ end
88
+
89
+ # Convert the message to an OpenAI API-compatible hash
90
+ # @return [Hash] The message as an OpenAI API-compatible hash, with the role as "assistant"
91
+ def assistant_hash
92
+ if tool_calls.any?
93
+ {
94
+ role: "assistant",
95
+ tool_calls: tool_calls
96
+ }
97
+ else
98
+ {
99
+ role: "assistant",
100
+ content: build_content_array
101
+ }
102
+ end
103
+ end
104
+
105
+ # Convert the message to an OpenAI API-compatible hash
106
+ # @return [Hash] The message as an OpenAI API-compatible hash, with the role as "system"
107
+ def system_hash
108
+ {
109
+ role: "system",
110
+ content: build_content_array
111
+ }
112
+ end
113
+
114
+ # Convert the message to an OpenAI API-compatible hash
115
+ # @return [Hash] The message as an OpenAI API-compatible hash, with the role as "tool"
116
+ def tool_hash
117
+ {
118
+ role: "tool",
119
+ tool_call_id: tool_call_id,
120
+ content: build_content_array
121
+ }
122
+ end
123
+
124
+ # Convert the message to an OpenAI API-compatible hash
125
+ # @return [Hash] The message as an OpenAI API-compatible hash, with the role as "user"
126
+ def user_hash
127
+ {
128
+ role: "user",
129
+ content: build_content_array
130
+ }
131
+ end
132
+
133
+ # Builds the content value for the message hash
134
+ # @return [Array<Hash>] An array of content hashes, with keys :type and :text or :image_url.
135
+ def build_content_array
136
+ content_details = []
137
+ if content && !content.empty?
138
+ content_details << {
139
+ type: "text",
140
+ text: content
141
+ }
142
+ end
143
+
144
+ if image_url
145
+ content_details << {
146
+ type: "image_url",
147
+ image_url: {
148
+ url: image_url
149
+ }
150
+ }
151
+ end
152
+ content_details
153
+ end
102
154
  end
103
155
  end
104
156
  end
@@ -24,6 +24,7 @@ module Langchain
24
24
 
25
25
  attr_accessor :tools,
26
26
  :add_message_callback,
27
+ :tool_execution_callback,
27
28
  :parallel_tool_calls
28
29
 
29
30
  # Create a new assistant
@@ -35,6 +36,7 @@ module Langchain
35
36
  # @param parallel_tool_calls [Boolean] Whether or not to run tools in parallel
36
37
  # @param messages [Array<Langchain::Assistant::Messages::Base>] The messages
37
38
  # @param add_message_callback [Proc] A callback function (Proc or lambda) that is called when any message is added to the conversation
39
+ # @param tool_execution_callback [Proc] A callback function (Proc or lambda) that is called right before a tool function is executed
38
40
  def initialize(
39
41
  llm:,
40
42
  tools: [],
@@ -42,7 +44,9 @@ module Langchain
42
44
  tool_choice: "auto",
43
45
  parallel_tool_calls: true,
44
46
  messages: [],
47
+ # Callbacks
45
48
  add_message_callback: nil,
49
+ tool_execution_callback: nil,
46
50
  &block
47
51
  )
48
52
  unless tools.is_a?(Array) && tools.all? { |tool| tool.class.singleton_class.included_modules.include?(Langchain::ToolDefinition) }
@@ -52,11 +56,8 @@ module Langchain
52
56
  @llm = llm
53
57
  @llm_adapter = LLM::Adapter.build(llm)
54
58
 
55
- # TODO: Validate that it is, indeed, a Proc or lambda
56
- if !add_message_callback.nil? && !add_message_callback.respond_to?(:call)
57
- raise ArgumentError, "add_message_callback must be a callable object, like Proc or lambda"
58
- end
59
- @add_message_callback = add_message_callback
59
+ @add_message_callback = add_message_callback if validate_callback!("add_message_callback", add_message_callback)
60
+ @tool_execution_callback = tool_execution_callback if validate_callback!("tool_execution_callback", tool_execution_callback)
60
61
 
61
62
  self.messages = messages
62
63
  @tools = tools
@@ -353,16 +354,26 @@ module Langchain
353
354
  def run_tools(tool_calls)
354
355
  # Iterate over each function invocation and submit tool output
355
356
  tool_calls.each do |tool_call|
356
- tool_call_id, tool_name, method_name, tool_arguments = @llm_adapter.extract_tool_call_args(tool_call: tool_call)
357
+ run_tool(tool_call)
358
+ end
359
+ end
357
360
 
358
- tool_instance = tools.find do |t|
359
- t.class.tool_name == tool_name
360
- end or raise ArgumentError, "Tool: #{tool_name} not found in assistant.tools"
361
+ # Run the tool call
362
+ #
363
+ # @param tool_call [Hash] The tool call to run
364
+ # @return [Object] The result of the tool call
365
+ def run_tool(tool_call)
366
+ tool_call_id, tool_name, method_name, tool_arguments = @llm_adapter.extract_tool_call_args(tool_call: tool_call)
361
367
 
362
- output = tool_instance.send(method_name, **tool_arguments)
368
+ tool_instance = tools.find do |t|
369
+ t.class.tool_name == tool_name
370
+ end or raise ArgumentError, "Tool: #{tool_name} not found in assistant.tools"
363
371
 
364
- submit_tool_output(tool_call_id: tool_call_id, output: output)
365
- end
372
+ # Call the callback if set
373
+ tool_execution_callback.call(tool_call_id, tool_name, method_name, tool_arguments) if tool_execution_callback # rubocop:disable Style/SafeNavigation
374
+ output = tool_instance.send(method_name, **tool_arguments)
375
+
376
+ submit_tool_output(tool_call_id: tool_call_id, output: output)
366
377
  end
367
378
 
368
379
  # Build a message
@@ -392,5 +403,13 @@ module Langchain
392
403
  def available_tool_names
393
404
  llm_adapter.available_tool_names(tools)
394
405
  end
406
+
407
+ def validate_callback!(attr_name, callback)
408
+ if !callback.nil? && !callback.respond_to?(:call)
409
+ raise ArgumentError, "#{attr_name} must be a callable object, like Proc or lambda"
410
+ end
411
+
412
+ true
413
+ end
395
414
  end
396
415
  end
@@ -15,14 +15,14 @@ module Langchain::LLM
15
15
  temperature: 0.0,
16
16
  completion_model: "claude-2.1",
17
17
  chat_model: "claude-3-5-sonnet-20240620",
18
- max_tokens_to_sample: 256
18
+ max_tokens: 256
19
19
  }.freeze
20
20
 
21
21
  # Initialize an Anthropic LLM instance
22
22
  #
23
23
  # @param api_key [String] The API key to use
24
24
  # @param llm_options [Hash] Options to pass to the Anthropic client
25
- # @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens_to_sample: }
25
+ # @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens: }
26
26
  # @return [Langchain::LLM::Anthropic] Langchain::LLM::Anthropic instance
27
27
  def initialize(api_key:, llm_options: {}, default_options: {})
28
28
  depends_on "anthropic"
@@ -32,7 +32,7 @@ module Langchain::LLM
32
32
  chat_parameters.update(
33
33
  model: {default: @defaults[:chat_model]},
34
34
  temperature: {default: @defaults[:temperature]},
35
- max_tokens: {default: @defaults[:max_tokens_to_sample]},
35
+ max_tokens: {default: @defaults[:max_tokens]},
36
36
  metadata: {},
37
37
  system: {}
38
38
  )
@@ -55,7 +55,7 @@ module Langchain::LLM
55
55
  def complete(
56
56
  prompt:,
57
57
  model: @defaults[:completion_model],
58
- max_tokens_to_sample: @defaults[:max_tokens_to_sample],
58
+ max_tokens: @defaults[:max_tokens],
59
59
  stop_sequences: nil,
60
60
  temperature: @defaults[:temperature],
61
61
  top_p: nil,
@@ -64,12 +64,12 @@ module Langchain::LLM
64
64
  stream: nil
65
65
  )
66
66
  raise ArgumentError.new("model argument is required") if model.empty?
67
- raise ArgumentError.new("max_tokens_to_sample argument is required") if max_tokens_to_sample.nil?
67
+ raise ArgumentError.new("max_tokens argument is required") if max_tokens.nil?
68
68
 
69
69
  parameters = {
70
70
  model: model,
71
71
  prompt: prompt,
72
- max_tokens_to_sample: max_tokens_to_sample,
72
+ max_tokens_to_sample: max_tokens,
73
73
  temperature: temperature
74
74
  }
75
75
  parameters[:stop_sequences] = stop_sequences if stop_sequences
@@ -7,51 +7,40 @@ module Langchain::LLM
7
7
  # gem 'aws-sdk-bedrockruntime', '~> 1.1'
8
8
  #
9
9
  # Usage:
10
- # llm = Langchain::LLM::AwsBedrock.new(llm_options: {})
10
+ # llm = Langchain::LLM::AwsBedrock.new(default_options: {})
11
11
  #
12
12
  class AwsBedrock < Base
13
13
  DEFAULTS = {
14
- chat_model: "anthropic.claude-v2",
15
- completion_model: "anthropic.claude-v2",
14
+ chat_model: "anthropic.claude-3-5-sonnet-20240620-v1:0",
15
+ completion_model: "anthropic.claude-v2:1",
16
16
  embedding_model: "amazon.titan-embed-text-v1",
17
17
  max_tokens_to_sample: 300,
18
18
  temperature: 1,
19
19
  top_k: 250,
20
20
  top_p: 0.999,
21
21
  stop_sequences: ["\n\nHuman:"],
22
- anthropic_version: "bedrock-2023-05-31",
23
- return_likelihoods: "NONE",
24
- count_penalty: {
25
- scale: 0,
26
- apply_to_whitespaces: false,
27
- apply_to_punctuations: false,
28
- apply_to_numbers: false,
29
- apply_to_stopwords: false,
30
- apply_to_emojis: false
31
- },
32
- presence_penalty: {
33
- scale: 0,
34
- apply_to_whitespaces: false,
35
- apply_to_punctuations: false,
36
- apply_to_numbers: false,
37
- apply_to_stopwords: false,
38
- apply_to_emojis: false
39
- },
40
- frequency_penalty: {
41
- scale: 0,
42
- apply_to_whitespaces: false,
43
- apply_to_punctuations: false,
44
- apply_to_numbers: false,
45
- apply_to_stopwords: false,
46
- apply_to_emojis: false
47
- }
22
+ return_likelihoods: "NONE"
48
23
  }.freeze
49
24
 
50
25
  attr_reader :client, :defaults
51
26
 
52
- SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic ai21 cohere meta].freeze
53
- SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[anthropic].freeze
54
- SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon cohere].freeze
27
+ SUPPORTED_COMPLETION_PROVIDERS = %i[
28
+ anthropic
29
+ ai21
30
+ cohere
31
+ meta
32
+ ].freeze
33
+
34
+ SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[
35
+ anthropic
36
+ ai21
37
+ mistral
38
+ ].freeze
39
+
40
+ SUPPORTED_EMBEDDING_PROVIDERS = %i[
41
+ amazon
42
+ cohere
43
+ ].freeze
55
44
 
56
45
  def initialize(aws_client_options: {}, default_options: {})
57
46
  depends_on "aws-sdk-bedrockruntime", req: "aws-sdk-bedrockruntime"
@@ -64,8 +53,7 @@ module Langchain::LLM
64
53
  temperature: {},
65
54
  max_tokens: {default: @defaults[:max_tokens_to_sample]},
66
55
  metadata: {},
67
- system: {},
68
- anthropic_version: {default: "bedrock-2023-05-31"}
56
+ system: {}
69
57
  )
70
58
  chat_parameters.ignore(:n, :user)
71
59
  chat_parameters.remap(stop: :stop_sequences)
@@ -100,23 +88,25 @@ module Langchain::LLM
100
88
  # @param params extra parameters passed to Aws::BedrockRuntime::Client#invoke_model
101
89
  # @return [Langchain::LLM::AnthropicResponse], [Langchain::LLM::CohereResponse] or [Langchain::LLM::AI21Response] Response object
102
90
  #
103
- def complete(prompt:, **params)
104
- raise "Completion provider #{completion_provider} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(completion_provider)
91
+ def complete(
92
+ prompt:,
93
+ model: @defaults[:completion_model],
94
+ **params
95
+ )
96
+ raise "Completion provider #{model} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(provider_name(model))
105
97
 
106
- raise "Model #{@defaults[:completion_model]} only supports #chat." if @defaults[:completion_model].include?("claude-3")
107
-
108
- parameters = compose_parameters params
98
+ parameters = compose_parameters(params, model)
109
99
 
110
100
  parameters[:prompt] = wrap_prompt prompt
111
101
 
112
102
  response = client.invoke_model({
113
- model_id: @defaults[:completion_model],
103
+ model_id: model,
114
104
  body: parameters.to_json,
115
105
  content_type: "application/json",
116
106
  accept: "application/json"
117
107
  })
118
108
 
119
- parse_response response
109
+ parse_response(response, model)
120
110
  end
121
111
 
122
112
  # Generate a chat completion for a given prompt
@@ -137,10 +127,11 @@ module Langchain::LLM
137
127
  # @return [Langchain::LLM::AnthropicResponse] Response object
138
128
  def chat(params = {}, &block)
139
129
  parameters = chat_parameters.to_params(params)
130
+ parameters = compose_parameters(parameters, parameters[:model])
140
131
 
141
- raise ArgumentError.new("messages argument is required") if Array(parameters[:messages]).empty?
142
-
143
- raise "Model #{parameters[:model]} does not support chat completions." unless Langchain::LLM::AwsBedrock::SUPPORTED_CHAT_COMPLETION_PROVIDERS.include?(completion_provider)
132
+ unless SUPPORTED_CHAT_COMPLETION_PROVIDERS.include?(provider_name(parameters[:model]))
133
+ raise "Chat provider #{parameters[:model]} is not supported."
134
+ end
144
135
 
145
136
  if block
146
137
  response_chunks = []
@@ -168,12 +159,26 @@ module Langchain::LLM
168
159
  accept: "application/json"
169
160
  })
170
161
 
171
- parse_response response
162
+ parse_response(response, parameters[:model])
172
163
  end
173
164
  end
174
165
 
175
166
  private
176
167
 
168
+ def parse_model_id(model_id)
169
+ model_id
170
+ .gsub("us.", "") # Meta append "us." to their model ids
171
+ .split(".")
172
+ end
173
+
174
+ def provider_name(model_id)
175
+ parse_model_id(model_id).first.to_sym
176
+ end
177
+
178
+ def model_name(model_id)
179
+ parse_model_id(model_id).last
180
+ end
181
+
177
182
  def completion_provider
178
183
  @defaults[:completion_model].split(".").first.to_sym
179
184
  end
@@ -200,15 +205,17 @@ module Langchain::LLM
200
205
  end
201
206
  end
202
207
 
203
- def compose_parameters(params)
204
- if completion_provider == :anthropic
205
- compose_parameters_anthropic params
206
- elsif completion_provider == :cohere
207
- compose_parameters_cohere params
208
- elsif completion_provider == :ai21
209
- compose_parameters_ai21 params
210
- elsif completion_provider == :meta
211
- compose_parameters_meta params
208
+ def compose_parameters(params, model_id)
209
+ if provider_name(model_id) == :anthropic
210
+ compose_parameters_anthropic(params)
211
+ elsif provider_name(model_id) == :cohere
212
+ compose_parameters_cohere(params)
213
+ elsif provider_name(model_id) == :ai21
214
+ params
215
+ elsif provider_name(model_id) == :meta
216
+ params
217
+ elsif provider_name(model_id) == :mistral
218
+ params
212
219
  end
213
220
  end
214
221
 
@@ -220,15 +227,17 @@ module Langchain::LLM
220
227
  end
221
228
  end
222
229
 
223
- def parse_response(response)
224
- if completion_provider == :anthropic
230
+ def parse_response(response, model_id)
231
+ if provider_name(model_id) == :anthropic
225
232
  Langchain::LLM::AnthropicResponse.new(JSON.parse(response.body.string))
226
- elsif completion_provider == :cohere
233
+ elsif provider_name(model_id) == :cohere
227
234
  Langchain::LLM::CohereResponse.new(JSON.parse(response.body.string))
228
- elsif completion_provider == :ai21
235
+ elsif provider_name(model_id) == :ai21
229
236
  Langchain::LLM::AI21Response.new(JSON.parse(response.body.string, symbolize_names: true))
230
- elsif completion_provider == :meta
237
+ elsif provider_name(model_id) == :meta
231
238
  Langchain::LLM::AwsBedrockMetaResponse.new(JSON.parse(response.body.string))
239
+ elsif provider_name(model_id) == :mistral
240
+ Langchain::LLM::MistralAIResponse.new(JSON.parse(response.body.string))
232
241
  end
233
242
  end
234
243
 
@@ -276,61 +285,7 @@ module Langchain::LLM
276
285
  end
277
286
 
278
287
  def compose_parameters_anthropic(params)
279
- default_params = @defaults.merge(params)
280
-
281
- {
282
- max_tokens_to_sample: default_params[:max_tokens_to_sample],
283
- temperature: default_params[:temperature],
284
- top_k: default_params[:top_k],
285
- top_p: default_params[:top_p],
286
- stop_sequences: default_params[:stop_sequences],
287
- anthropic_version: default_params[:anthropic_version]
288
- }
289
- end
290
-
291
- def compose_parameters_ai21(params)
292
- default_params = @defaults.merge(params)
293
-
294
- {
295
- maxTokens: default_params[:max_tokens_to_sample],
296
- temperature: default_params[:temperature],
297
- topP: default_params[:top_p],
298
- stopSequences: default_params[:stop_sequences],
299
- countPenalty: {
300
- scale: default_params[:count_penalty][:scale],
301
- applyToWhitespaces: default_params[:count_penalty][:apply_to_whitespaces],
302
- applyToPunctuations: default_params[:count_penalty][:apply_to_punctuations],
303
- applyToNumbers: default_params[:count_penalty][:apply_to_numbers],
304
- applyToStopwords: default_params[:count_penalty][:apply_to_stopwords],
305
- applyToEmojis: default_params[:count_penalty][:apply_to_emojis]
306
- },
307
- presencePenalty: {
308
- scale: default_params[:presence_penalty][:scale],
309
- applyToWhitespaces: default_params[:presence_penalty][:apply_to_whitespaces],
310
- applyToPunctuations: default_params[:presence_penalty][:apply_to_punctuations],
311
- applyToNumbers: default_params[:presence_penalty][:apply_to_numbers],
312
- applyToStopwords: default_params[:presence_penalty][:apply_to_stopwords],
313
- applyToEmojis: default_params[:presence_penalty][:apply_to_emojis]
314
- },
315
- frequencyPenalty: {
316
- scale: default_params[:frequency_penalty][:scale],
317
- applyToWhitespaces: default_params[:frequency_penalty][:apply_to_whitespaces],
318
- applyToPunctuations: default_params[:frequency_penalty][:apply_to_punctuations],
319
- applyToNumbers: default_params[:frequency_penalty][:apply_to_numbers],
320
- applyToStopwords: default_params[:frequency_penalty][:apply_to_stopwords],
321
- applyToEmojis: default_params[:frequency_penalty][:apply_to_emojis]
322
- }
323
- }
324
- end
325
-
326
- def compose_parameters_meta(params)
327
- default_params = @defaults.merge(params)
328
-
329
- {
330
- temperature: default_params[:temperature],
331
- top_p: default_params[:top_p],
332
- max_gen_len: default_params[:max_tokens_to_sample]
333
- }
288
+ params.merge(anthropic_version: "bedrock-2023-05-31")
334
289
  end
335
290
 
336
291
  def response_from_chunks(chunks)
@@ -14,7 +14,8 @@ module Langchain::LLM
14
14
  temperature: 0.0,
15
15
  completion_model: "llama3.1",
16
16
  embedding_model: "llama3.1",
17
- chat_model: "llama3.1"
17
+ chat_model: "llama3.1",
18
+ options: {}
18
19
  }.freeze
19
20
 
20
21
  EMBEDDING_SIZES = {
@@ -45,7 +46,8 @@ module Langchain::LLM
45
46
  temperature: {default: @defaults[:temperature]},
46
47
  template: {},
47
48
  stream: {default: false},
48
- response_format: {default: @defaults[:response_format]}
49
+ response_format: {default: @defaults[:response_format]},
50
+ options: {default: @defaults[:options]}
49
51
  )
50
52
  chat_parameters.remap(response_format: :format)
51
53
  end
@@ -37,7 +37,10 @@ module Langchain::LLM::Parameters
37
37
  parallel_tool_calls: {},
38
38
 
39
39
  # Additional optional parameters
40
- logit_bias: {}
40
+ logit_bias: {},
41
+
42
+ # Additional llm options. Ollama only.
43
+ options: {}
41
44
  }
42
45
 
43
46
  def initialize(parameters: {})
@@ -9,5 +9,25 @@ module Langchain::LLM
9
9
  def completion
10
10
  completions.dig(0, :data, :text)
11
11
  end
12
+
13
+ def chat_completion
14
+ raw_response.dig(:choices, 0, :message, :content)
15
+ end
16
+
17
+ def prompt_tokens
18
+ raw_response.dig(:usage, :prompt_tokens).to_i
19
+ end
20
+
21
+ def completion_tokens
22
+ raw_response.dig(:usage, :completion_tokens).to_i
23
+ end
24
+
25
+ def total_tokens
26
+ raw_response.dig(:usage, :total_tokens).to_i
27
+ end
28
+
29
+ def role
30
+ raw_response.dig(:choices, 0, :message, :role)
31
+ end
12
32
  end
13
33
  end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open-uri"
4
+ require "base64"
5
+
6
+ module Langchain
7
+ module Utils
8
+ class ImageWrapper
9
+ attr_reader :image_url
10
+
11
+ def initialize(image_url)
12
+ @image_url = image_url
13
+ end
14
+
15
+ def base64
16
+ @base64 ||= begin
17
+ image_data = open_image.read
18
+ Base64.strict_encode64(image_data)
19
+ end
20
+ end
21
+
22
+ def mime_type
23
+ # TODO: Make it work with local files
24
+ open_image.meta["content-type"]
25
+ end
26
+
27
+ private
28
+
29
+ def open_image
30
+ # TODO: Make it work with local files
31
+ uri = URI.parse(image_url)
32
+ raise URI::InvalidURIError, "Invalid URL scheme" unless %w[http https].include?(uri.scheme)
33
+ @open_image ||= URI.open(image_url) # rubocop:disable Security/Open
34
+ end
35
+ end
36
+ end
37
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.19.0"
4
+ VERSION = "0.19.2"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.0
4
+ version: 0.19.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-10-23 00:00:00.000000000 Z
11
+ date: 2024-11-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -640,6 +640,7 @@ files:
640
640
  - lib/langchain/assistant.rb
641
641
  - lib/langchain/assistant/llm/adapter.rb
642
642
  - lib/langchain/assistant/llm/adapters/anthropic.rb
643
+ - lib/langchain/assistant/llm/adapters/aws_bedrock_anthropic.rb
643
644
  - lib/langchain/assistant/llm/adapters/base.rb
644
645
  - lib/langchain/assistant/llm/adapters/google_gemini.rb
645
646
  - lib/langchain/assistant/llm/adapters/mistral_ai.rb
@@ -736,6 +737,7 @@ files:
736
737
  - lib/langchain/tool_definition.rb
737
738
  - lib/langchain/utils/cosine_similarity.rb
738
739
  - lib/langchain/utils/hash_transformer.rb
740
+ - lib/langchain/utils/image_wrapper.rb
739
741
  - lib/langchain/utils/to_boolean.rb
740
742
  - lib/langchain/vectorsearch/base.rb
741
743
  - lib/langchain/vectorsearch/chroma.rb