langchainrb 0.15.5 → 0.15.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -1
- data/README.md +1 -1
- data/lib/langchain/assistants/assistant.rb +31 -8
- data/lib/langchain/llm/aws_bedrock.rb +15 -1
- data/lib/langchain/llm/google_vertex_ai.rb +0 -2
- data/lib/langchain/llm/response/aws_bedrock_meta_response.rb +29 -0
- data/lib/langchain/version.rb +1 -1
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e5ee7b8ae1bd67027846f794a9d070ce4dcedc6e3c6c36eb169a42ba8a55f124
|
4
|
+
data.tar.gz: c5889ddebb21147551c33fe8be1df6ce1659018d6fb489579e497567fd744bf0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ef5d74c4e9f2a0e5f0f13b000dd5b82bbfa3ed9481d0bd1defce49d03da7071b31e02a5ec0237cce43b8ea8d76607541e59fe400d9db8803cf6961bb02b2f5bc
|
7
|
+
data.tar.gz: 01a816d66a313b387ce2b37789501636e7b7bede507bd61253bcea7cac3cd641fe74ab18498fe7ddff1bc5b8e38ec39bc0385912ae9e9f310cf94f4d5ec31e9c
|
data/CHANGELOG.md
CHANGED
@@ -1,6 +1,11 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
-
## [0.15.
|
3
|
+
## [0.15.6] - 2024-09-16
|
4
|
+
- Throw an error when `Langchain::Assistant#add_message_callback` is not a callable proc.
|
5
|
+
- Resetting instructions on Langchain::Assistant with Google Gemini no longer throws an error.
|
6
|
+
- Add Meta models support for AWS Bedrock LLM
|
7
|
+
|
8
|
+
## [0.15.5] - 2024-09-10 🇧🇦
|
4
9
|
- Fix for Langchain::Prompt::PromptTemplate supporting nested JSON data
|
5
10
|
- Require common libs at top-level
|
6
11
|
- Add `add_message_callback` to `Langchain::Assistant` constructor to invoke an optional function when any message is added to the conversation
|
data/README.md
CHANGED
@@ -421,7 +421,7 @@ assistant = Langchain::Assistant.new(
|
|
421
421
|
)
|
422
422
|
|
423
423
|
# Add a user message and run the assistant
|
424
|
-
assistant.add_message_and_run(content: "What's the latest news about AI?")
|
424
|
+
assistant.add_message_and_run!(content: "What's the latest news about AI?")
|
425
425
|
|
426
426
|
# Access the conversation thread
|
427
427
|
messages = assistant.messages
|
@@ -17,7 +17,7 @@ module Langchain
|
|
17
17
|
|
18
18
|
attr_reader :llm, :thread, :instructions, :state, :llm_adapter, :tool_choice
|
19
19
|
attr_reader :total_prompt_tokens, :total_completion_tokens, :total_tokens
|
20
|
-
attr_accessor :tools
|
20
|
+
attr_accessor :tools, :add_message_callback
|
21
21
|
|
22
22
|
# Create a new assistant
|
23
23
|
#
|
@@ -25,6 +25,8 @@ module Langchain
|
|
25
25
|
# @param thread [Langchain::Thread] The thread that'll keep track of the conversation
|
26
26
|
# @param tools [Array<Langchain::Tool::Base>] Tools that the assistant has access to
|
27
27
|
# @param instructions [String] The system instructions to include in the thread
|
28
|
+
# @param tool_choice [String] Specify how tools should be selected. Options: "auto", "any", "none", or <specific function name>
|
29
|
+
# @params add_message_callback [Proc] A callback function (Proc or lambda) that is called when any message is added to the conversation
|
28
30
|
def initialize(
|
29
31
|
llm:,
|
30
32
|
thread: nil,
|
@@ -41,6 +43,11 @@ module Langchain
|
|
41
43
|
@llm_adapter = LLM::Adapter.build(llm)
|
42
44
|
|
43
45
|
@thread = thread || Langchain::Thread.new
|
46
|
+
|
47
|
+
# TODO: Validate that it is, indeed, a Proc or lambda
|
48
|
+
if !add_message_callback.nil? && !add_message_callback.respond_to?(:call)
|
49
|
+
raise ArgumentError, "add_message_callback must be a callable object, like Proc or lambda"
|
50
|
+
end
|
44
51
|
@thread.add_message_callback = add_message_callback
|
45
52
|
|
46
53
|
@tools = tools
|
@@ -157,19 +164,24 @@ module Langchain
|
|
157
164
|
|
158
165
|
# Set new instructions
|
159
166
|
#
|
160
|
-
# @param [String] New instructions that will be set as a system message
|
167
|
+
# @param new_instructions [String] New instructions that will be set as a system message
|
161
168
|
# @return [Array<Langchain::Message>] The messages in the thread
|
162
169
|
def instructions=(new_instructions)
|
163
170
|
@instructions = new_instructions
|
164
171
|
|
165
|
-
#
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
172
|
+
# This only needs to be done that support Message#@role="system"
|
173
|
+
if !llm.is_a?(Langchain::LLM::GoogleGemini) &&
|
174
|
+
!llm.is_a?(Langchain::LLM::GoogleVertexAI) &&
|
175
|
+
!llm.is_a?(Langchain::LLM::Anthropic)
|
176
|
+
# Find message with role: "system" in thread.messages and delete it from the thread.messages array
|
177
|
+
replace_system_message!(content: new_instructions)
|
178
|
+
end
|
171
179
|
end
|
172
180
|
|
181
|
+
# Set tool_choice, how tools should be selected
|
182
|
+
#
|
183
|
+
# @param new_tool_choice [String] Tool choice
|
184
|
+
# @return [String] Selected tool choice
|
173
185
|
def tool_choice=(new_tool_choice)
|
174
186
|
validate_tool_choice!(new_tool_choice)
|
175
187
|
@tool_choice = new_tool_choice
|
@@ -177,6 +189,17 @@ module Langchain
|
|
177
189
|
|
178
190
|
private
|
179
191
|
|
192
|
+
# Replace old system message with new one
|
193
|
+
#
|
194
|
+
# @param content [String] New system message content
|
195
|
+
# @return [Array<Langchain::Message>] The messages in the thread
|
196
|
+
def replace_system_message!(content:)
|
197
|
+
thread.messages.delete_if(&:system?)
|
198
|
+
|
199
|
+
message = build_message(role: "system", content: content)
|
200
|
+
thread.messages.unshift(message)
|
201
|
+
end
|
202
|
+
|
180
203
|
# TODO: If tool_choice = "tool_function_name" and then tool is removed from the assistant, should we set tool_choice back to "auto"?
|
181
204
|
def validate_tool_choice!(tool_choice)
|
182
205
|
allowed_tool_choices = llm_adapter.allowed_tool_choices.concat(available_tool_names)
|
@@ -48,7 +48,7 @@ module Langchain::LLM
|
|
48
48
|
|
49
49
|
attr_reader :client, :defaults
|
50
50
|
|
51
|
-
SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic cohere
|
51
|
+
SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic ai21 cohere meta].freeze
|
52
52
|
SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[anthropic].freeze
|
53
53
|
SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon].freeze
|
54
54
|
|
@@ -209,6 +209,8 @@ module Langchain::LLM
|
|
209
209
|
compose_parameters_cohere params
|
210
210
|
elsif completion_provider == :ai21
|
211
211
|
compose_parameters_ai21 params
|
212
|
+
elsif completion_provider == :meta
|
213
|
+
compose_parameters_meta params
|
212
214
|
end
|
213
215
|
end
|
214
216
|
|
@@ -219,6 +221,8 @@ module Langchain::LLM
|
|
219
221
|
Langchain::LLM::CohereResponse.new(JSON.parse(response.body.string))
|
220
222
|
elsif completion_provider == :ai21
|
221
223
|
Langchain::LLM::AI21Response.new(JSON.parse(response.body.string, symbolize_names: true))
|
224
|
+
elsif completion_provider == :meta
|
225
|
+
Langchain::LLM::AwsBedrockMetaResponse.new(JSON.parse(response.body.string))
|
222
226
|
end
|
223
227
|
end
|
224
228
|
|
@@ -282,6 +286,16 @@ module Langchain::LLM
|
|
282
286
|
}
|
283
287
|
end
|
284
288
|
|
289
|
+
def compose_parameters_meta(params)
|
290
|
+
default_params = @defaults.merge(params)
|
291
|
+
|
292
|
+
{
|
293
|
+
temperature: default_params[:temperature],
|
294
|
+
top_p: default_params[:top_p],
|
295
|
+
max_gen_len: default_params[:max_tokens_to_sample]
|
296
|
+
}
|
297
|
+
end
|
298
|
+
|
285
299
|
def response_from_chunks(chunks)
|
286
300
|
raw_response = {}
|
287
301
|
|
@@ -88,8 +88,6 @@ module Langchain::LLM
|
|
88
88
|
def chat(params = {})
|
89
89
|
params[:system] = {parts: [{text: params[:system]}]} if params[:system]
|
90
90
|
params[:tools] = {function_declarations: params[:tools]} if params[:tools]
|
91
|
-
# This throws an error when tool_choice is passed
|
92
|
-
params[:tool_choice] = {function_calling_config: {mode: params[:tool_choice].upcase}} if params[:tool_choice]
|
93
91
|
|
94
92
|
raise ArgumentError.new("messages argument is required") if Array(params[:messages]).empty?
|
95
93
|
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::LLM
|
4
|
+
class AwsBedrockMetaResponse < BaseResponse
|
5
|
+
def completion
|
6
|
+
completions.first
|
7
|
+
end
|
8
|
+
|
9
|
+
def completions
|
10
|
+
[raw_response.dig("generation")]
|
11
|
+
end
|
12
|
+
|
13
|
+
def stop_reason
|
14
|
+
raw_response.dig("stop_reason")
|
15
|
+
end
|
16
|
+
|
17
|
+
def prompt_tokens
|
18
|
+
raw_response.dig("prompt_token_count").to_i
|
19
|
+
end
|
20
|
+
|
21
|
+
def completion_tokens
|
22
|
+
raw_response.dig("generation_token_count").to_i
|
23
|
+
end
|
24
|
+
|
25
|
+
def total_tokens
|
26
|
+
prompt_tokens + completion_tokens
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.15.
|
4
|
+
version: 0.15.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-09-
|
11
|
+
date: 2024-09-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -712,6 +712,7 @@ files:
|
|
712
712
|
- lib/langchain/llm/replicate.rb
|
713
713
|
- lib/langchain/llm/response/ai21_response.rb
|
714
714
|
- lib/langchain/llm/response/anthropic_response.rb
|
715
|
+
- lib/langchain/llm/response/aws_bedrock_meta_response.rb
|
715
716
|
- lib/langchain/llm/response/aws_titan_response.rb
|
716
717
|
- lib/langchain/llm/response/base_response.rb
|
717
718
|
- lib/langchain/llm/response/cohere_response.rb
|