langchainrb 0.15.4 → 0.15.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 68d2d64fb264bf47488e83581540b88f7746f0e1b1b318d7dfc9c15356f40c8c
4
- data.tar.gz: efbd840632f0f22b202d9257a2020c4bc49cea7528d79efa7e05f963e9e4745f
3
+ metadata.gz: e5ee7b8ae1bd67027846f794a9d070ce4dcedc6e3c6c36eb169a42ba8a55f124
4
+ data.tar.gz: c5889ddebb21147551c33fe8be1df6ce1659018d6fb489579e497567fd744bf0
5
5
  SHA512:
6
- metadata.gz: f57817cc62de3af8f9aa80c62e421255e4172f55d54d91c1adc7d7a03ac9272e866670532c996a19d502c6d3110627d778efdad8c4faad0b07868c8b0aebc81c
7
- data.tar.gz: b35d82314edc0d747c87a37bd0e2036755f884e85977639e8b53063ca8ebd7002b9dd4ae9a5dd443f088552e575471e080dc9aa4519e25efaa908e16130b613a
6
+ metadata.gz: ef5d74c4e9f2a0e5f0f13b000dd5b82bbfa3ed9481d0bd1defce49d03da7071b31e02a5ec0237cce43b8ea8d76607541e59fe400d9db8803cf6961bb02b2f5bc
7
+ data.tar.gz: 01a816d66a313b387ce2b37789501636e7b7bede507bd61253bcea7cac3cd641fe74ab18498fe7ddff1bc5b8e38ec39bc0385912ae9e9f310cf94f4d5ec31e9c
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.15.6] - 2024-09-16
4
+ - Throw an error when `Langchain::Assistant#add_message_callback` is not a callable proc.
5
+ - Resetting instructions on Langchain::Assistant with Google Gemini no longer throws an error.
6
+ - Add Meta models support for AWS Bedrock LLM
7
+
8
+ ## [0.15.5] - 2024-09-10 🇧🇦
9
+ - Fix for Langchain::Prompt::PromptTemplate supporting nested JSON data
10
+ - Require common libs at top-level
11
+ - Add `add_message_callback` to `Langchain::Assistant` constructor to invoke an optional function when any message is added to the conversation
12
+ - Adding Assistant syntactic sugar with #run! and #add_message_and_run!
13
+
3
14
  ## [0.15.4] - 2024-08-30
4
15
  - Improve the Langchain::Tool::Database tool
5
16
  - Allow explictly setting tool_choice on the Assistant instance
data/README.md CHANGED
@@ -421,13 +421,13 @@ assistant = Langchain::Assistant.new(
421
421
  )
422
422
 
423
423
  # Add a user message and run the assistant
424
- assistant.add_message_and_run(content: "What's the latest news about AI?")
424
+ assistant.add_message_and_run!(content: "What's the latest news about AI?")
425
425
 
426
426
  # Access the conversation thread
427
427
  messages = assistant.messages
428
428
 
429
429
  # Run the assistant with automatic tool execution
430
- assistant.run(auto_tool_execution: true)
430
+ assistant.run!
431
431
  ```
432
432
 
433
433
  ### Configuration
@@ -435,11 +435,12 @@ assistant.run(auto_tool_execution: true)
435
435
  * `tools`: An array of tool instances (optional)
436
436
  * `instructions`: System instructions for the assistant (optional)
437
437
  * `tool_choice`: Specifies how tools should be selected. Default: "auto". A specific tool function name can be passed. This will force the Assistant to **always** use this function.
438
+ * `add_message_callback`: A callback function (proc, lambda) that is called when any message is added to the conversation (optional)
438
439
 
439
440
  ### Key Methods
440
441
  * `add_message`: Adds a user message to the messages array
441
- * `run`: Processes the conversation and generates responses
442
- * `add_message_and_run`: Combines adding a message and running the assistant
442
+ * `run!`: Processes the conversation and generates responses
443
+ * `add_message_and_run!`: Combines adding a message and running the assistant
443
444
  * `submit_tool_output`: Manually submit output to a tool call
444
445
  * `messages`: Returns a list of ongoing messages
445
446
 
@@ -17,7 +17,7 @@ module Langchain
17
17
 
18
18
  attr_reader :llm, :thread, :instructions, :state, :llm_adapter, :tool_choice
19
19
  attr_reader :total_prompt_tokens, :total_completion_tokens, :total_tokens
20
- attr_accessor :tools
20
+ attr_accessor :tools, :add_message_callback
21
21
 
22
22
  # Create a new assistant
23
23
  #
@@ -25,12 +25,15 @@ module Langchain
25
25
  # @param thread [Langchain::Thread] The thread that'll keep track of the conversation
26
26
  # @param tools [Array<Langchain::Tool::Base>] Tools that the assistant has access to
27
27
  # @param instructions [String] The system instructions to include in the thread
28
+ # @param tool_choice [String] Specify how tools should be selected. Options: "auto", "any", "none", or <specific function name>
29
+ # @params add_message_callback [Proc] A callback function (Proc or lambda) that is called when any message is added to the conversation
28
30
  def initialize(
29
31
  llm:,
30
32
  thread: nil,
31
33
  tools: [],
32
34
  instructions: nil,
33
- tool_choice: "auto"
35
+ tool_choice: "auto",
36
+ add_message_callback: nil
34
37
  )
35
38
  unless tools.is_a?(Array) && tools.all? { |tool| tool.class.singleton_class.included_modules.include?(Langchain::ToolDefinition) }
36
39
  raise ArgumentError, "Tools must be an array of objects extending Langchain::ToolDefinition"
@@ -38,7 +41,15 @@ module Langchain
38
41
 
39
42
  @llm = llm
40
43
  @llm_adapter = LLM::Adapter.build(llm)
44
+
41
45
  @thread = thread || Langchain::Thread.new
46
+
47
+ # TODO: Validate that it is, indeed, a Proc or lambda
48
+ if !add_message_callback.nil? && !add_message_callback.respond_to?(:call)
49
+ raise ArgumentError, "add_message_callback must be a callable object, like Proc or lambda"
50
+ end
51
+ @thread.add_message_callback = add_message_callback
52
+
42
53
  @tools = tools
43
54
  self.tool_choice = tool_choice
44
55
  @instructions = instructions
@@ -51,9 +62,8 @@ module Langchain
51
62
  raise ArgumentError, "Thread must be an instance of Langchain::Thread" unless @thread.is_a?(Langchain::Thread)
52
63
 
53
64
  # The first message in the thread should be the system instructions
54
- # TODO: What if the user added old messages and the system instructions are already in there? Should this overwrite the existing instructions?
55
- initialize_instructions
56
65
  # For Google Gemini, and Anthropic system instructions are added to the `system:` param in the `chat` method
66
+ initialize_instructions
57
67
  end
58
68
 
59
69
  # Add a user message to the thread
@@ -107,6 +117,13 @@ module Langchain
107
117
  thread.messages
108
118
  end
109
119
 
120
+ # Run the assistant with automatic tool execution
121
+ #
122
+ # @return [Array<Langchain::Message>] The messages in the thread
123
+ def run!
124
+ run(auto_tool_execution: true)
125
+ end
126
+
110
127
  # Add a user message to the thread and run the assistant
111
128
  #
112
129
  # @param content [String] The content of the message
@@ -117,6 +134,14 @@ module Langchain
117
134
  run(auto_tool_execution: auto_tool_execution)
118
135
  end
119
136
 
137
+ # Add a user message to the thread and run the assistant with automatic tool execution
138
+ #
139
+ # @param content [String] The content of the message
140
+ # @return [Array<Langchain::Message>] The messages in the thread
141
+ def add_message_and_run!(content:)
142
+ add_message_and_run(content: content, auto_tool_execution: true)
143
+ end
144
+
120
145
  # Submit tool output to the thread
121
146
  #
122
147
  # @param tool_call_id [String] The ID of the tool call to submit output for
@@ -139,19 +164,24 @@ module Langchain
139
164
 
140
165
  # Set new instructions
141
166
  #
142
- # @param [String] New instructions that will be set as a system message
167
+ # @param new_instructions [String] New instructions that will be set as a system message
143
168
  # @return [Array<Langchain::Message>] The messages in the thread
144
169
  def instructions=(new_instructions)
145
170
  @instructions = new_instructions
146
171
 
147
- # Find message with role: "system" in thread.messages and delete it from the thread.messages array
148
- thread.messages.delete_if(&:system?)
149
-
150
- # Set new instructions by adding new system message
151
- message = build_message(role: "system", content: new_instructions)
152
- thread.messages.unshift(message)
172
+ # This only needs to be done that support Message#@role="system"
173
+ if !llm.is_a?(Langchain::LLM::GoogleGemini) &&
174
+ !llm.is_a?(Langchain::LLM::GoogleVertexAI) &&
175
+ !llm.is_a?(Langchain::LLM::Anthropic)
176
+ # Find message with role: "system" in thread.messages and delete it from the thread.messages array
177
+ replace_system_message!(content: new_instructions)
178
+ end
153
179
  end
154
180
 
181
+ # Set tool_choice, how tools should be selected
182
+ #
183
+ # @param new_tool_choice [String] Tool choice
184
+ # @return [String] Selected tool choice
155
185
  def tool_choice=(new_tool_choice)
156
186
  validate_tool_choice!(new_tool_choice)
157
187
  @tool_choice = new_tool_choice
@@ -159,6 +189,17 @@ module Langchain
159
189
 
160
190
  private
161
191
 
192
+ # Replace old system message with new one
193
+ #
194
+ # @param content [String] New system message content
195
+ # @return [Array<Langchain::Message>] The messages in the thread
196
+ def replace_system_message!(content:)
197
+ thread.messages.delete_if(&:system?)
198
+
199
+ message = build_message(role: "system", content: content)
200
+ thread.messages.unshift(message)
201
+ end
202
+
162
203
  # TODO: If tool_choice = "tool_function_name" and then tool is removed from the assistant, should we set tool_choice back to "auto"?
163
204
  def validate_tool_choice!(tool_choice)
164
205
  allowed_tool_choices = llm_adapter.allowed_tool_choices.concat(available_tool_names)
@@ -287,7 +328,7 @@ module Langchain
287
328
 
288
329
  def initialize_instructions
289
330
  if llm.is_a?(Langchain::LLM::OpenAI) || llm.is_a?(Langchain::LLM::MistralAI)
290
- add_message(role: "system", content: instructions) if instructions
331
+ self.instructions = @instructions if @instructions
291
332
  end
292
333
  end
293
334
 
@@ -4,12 +4,14 @@ module Langchain
4
4
  # Langchain::Thread keeps track of messages in a conversation.
5
5
  # TODO: Add functionality to persist to the thread to disk, DB, storage, etc.
6
6
  class Thread
7
- attr_accessor :messages
7
+ attr_accessor :messages, :add_message_callback
8
8
 
9
9
  # @param messages [Array<Langchain::Message>]
10
- def initialize(messages: [])
10
+ # @param add_message_callback [Proc] A callback to call when a message is added to the thread
11
+ def initialize(messages: [], add_message_callback: nil)
11
12
  raise ArgumentError, "messages array must only contain Langchain::Message instance(s)" unless messages.is_a?(Array) && messages.all? { |m| m.is_a?(Langchain::Messages::Base) }
12
13
 
14
+ @add_message_callback = add_message_callback
13
15
  @messages = messages
14
16
  end
15
17
 
@@ -34,6 +36,9 @@ module Langchain
34
36
  def add_message(message)
35
37
  raise ArgumentError, "message must be a Langchain::Message instance" unless message.is_a?(Langchain::Messages::Base)
36
38
 
39
+ # Call the callback with the message
40
+ add_message_callback.call(message) if add_message_callback # rubocop:disable Style/SafeNavigation
41
+
37
42
  # Prepend the message to the thread
38
43
  messages << message
39
44
  end
@@ -48,7 +48,7 @@ module Langchain::LLM
48
48
 
49
49
  attr_reader :client, :defaults
50
50
 
51
- SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic cohere ai21].freeze
51
+ SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic ai21 cohere meta].freeze
52
52
  SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[anthropic].freeze
53
53
  SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon].freeze
54
54
 
@@ -209,6 +209,8 @@ module Langchain::LLM
209
209
  compose_parameters_cohere params
210
210
  elsif completion_provider == :ai21
211
211
  compose_parameters_ai21 params
212
+ elsif completion_provider == :meta
213
+ compose_parameters_meta params
212
214
  end
213
215
  end
214
216
 
@@ -219,6 +221,8 @@ module Langchain::LLM
219
221
  Langchain::LLM::CohereResponse.new(JSON.parse(response.body.string))
220
222
  elsif completion_provider == :ai21
221
223
  Langchain::LLM::AI21Response.new(JSON.parse(response.body.string, symbolize_names: true))
224
+ elsif completion_provider == :meta
225
+ Langchain::LLM::AwsBedrockMetaResponse.new(JSON.parse(response.body.string))
222
226
  end
223
227
  end
224
228
 
@@ -282,6 +286,16 @@ module Langchain::LLM
282
286
  }
283
287
  end
284
288
 
289
+ def compose_parameters_meta(params)
290
+ default_params = @defaults.merge(params)
291
+
292
+ {
293
+ temperature: default_params[:temperature],
294
+ top_p: default_params[:top_p],
295
+ max_gen_len: default_params[:max_tokens_to_sample]
296
+ }
297
+ end
298
+
285
299
  def response_from_chunks(chunks)
286
300
  raw_response = {}
287
301
 
@@ -88,8 +88,6 @@ module Langchain::LLM
88
88
  def chat(params = {})
89
89
  params[:system] = {parts: [{text: params[:system]}]} if params[:system]
90
90
  params[:tools] = {function_declarations: params[:tools]} if params[:tools]
91
- # This throws an error when tool_choice is passed
92
- params[:tool_choice] = {function_calling_config: {mode: params[:tool_choice].upcase}} if params[:tool_choice]
93
91
 
94
92
  raise ArgumentError.new("messages argument is required") if Array(params[:messages]).empty?
95
93
 
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ class AwsBedrockMetaResponse < BaseResponse
5
+ def completion
6
+ completions.first
7
+ end
8
+
9
+ def completions
10
+ [raw_response.dig("generation")]
11
+ end
12
+
13
+ def stop_reason
14
+ raw_response.dig("stop_reason")
15
+ end
16
+
17
+ def prompt_tokens
18
+ raw_response.dig("prompt_token_count").to_i
19
+ end
20
+
21
+ def completion_tokens
22
+ raw_response.dig("generation_token_count").to_i
23
+ end
24
+
25
+ def total_tokens
26
+ prompt_tokens + completion_tokens
27
+ end
28
+ end
29
+ end
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "json"
4
3
  require "json-schema"
5
4
 
6
5
  module Langchain::OutputParsers
@@ -1,4 +1,4 @@
1
- require "uri"
1
+ # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
4
  module Processors
@@ -1,7 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "strscan"
4
- require "json"
5
4
  require "yaml"
6
5
 
7
6
  module Langchain::Prompt
@@ -2,7 +2,6 @@
2
2
 
3
3
  require "strscan"
4
4
  require "pathname"
5
- require "json"
6
5
  require "yaml"
7
6
 
8
7
  module Langchain::Prompt
@@ -58,8 +58,9 @@ module Langchain::Prompt
58
58
  #
59
59
  def format(**kwargs)
60
60
  result = @template
61
+ result = result.gsub(/{{/, "{").gsub(/}}/, "}")
61
62
  kwargs.each { |key, value| result = result.gsub(/\{#{key}\}/, value.to_s) }
62
- result.gsub(/{{/, "{").gsub(/}}/, "}")
63
+ result
63
64
  end
64
65
 
65
66
  #
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "json"
4
-
5
3
  #
6
4
  # Extends a class to be used as a tool in the assistant.
7
5
  # A tool is a collection of functions (methods) used to perform specific tasks.
@@ -42,8 +40,8 @@ module Langchain::ToolDefinition
42
40
  # @param method_name [Symbol] Name of the method to define
43
41
  # @param description [String] Description of the function
44
42
  # @yield Block that defines the parameters for the function
45
- def define_function(method_name, description:, &)
46
- function_schemas.add_function(method_name:, description:, &)
43
+ def define_function(method_name, description:, &block)
44
+ function_schemas.add_function(method_name:, description:, &block)
47
45
  end
48
46
 
49
47
  # Returns the FunctionSchemas instance for this tool
@@ -76,11 +74,11 @@ module Langchain::ToolDefinition
76
74
  # @param description [String] Description of the function
77
75
  # @yield Block that defines the parameters for the function
78
76
  # @raise [ArgumentError] If a block is defined and no parameters are specified for the function
79
- def add_function(method_name:, description:, &)
77
+ def add_function(method_name:, description:, &block)
80
78
  name = "#{@tool_name}__#{method_name}"
81
79
 
82
- if block_given?
83
- parameters = ParameterBuilder.new(parent_type: "object").build(&)
80
+ if block_given? # rubocop:disable Performance/BlockGivenWithExplicitBlock
81
+ parameters = ParameterBuilder.new(parent_type: "object").build(&block)
84
82
 
85
83
  if parameters[:properties].empty?
86
84
  raise ArgumentError, "Function parameters must have at least one property defined within it, if a block is provided"
@@ -130,8 +128,8 @@ module Langchain::ToolDefinition
130
128
  #
131
129
  # @yield Block that defines the properties of the schema
132
130
  # @return [Hash] The built schema
133
- def build(&)
134
- instance_eval(&)
131
+ def build(&block)
132
+ instance_eval(&block)
135
133
  @schema
136
134
  end
137
135
 
@@ -144,13 +142,13 @@ module Langchain::ToolDefinition
144
142
  # @param required [Boolean] Whether the property is required
145
143
  # @yield [Block] Block for nested properties (only for object and array types)
146
144
  # @raise [ArgumentError] If any parameter is invalid
147
- def property(name = nil, type:, description: nil, enum: nil, required: false, &)
145
+ def property(name = nil, type:, description: nil, enum: nil, required: false, &block)
148
146
  validate_parameters(name:, type:, enum:, required:)
149
147
 
150
148
  prop = {type:, description:, enum:}.compact
151
149
 
152
- if block_given?
153
- nested_schema = ParameterBuilder.new(parent_type: type).build(&)
150
+ if block_given? # rubocop:disable Performance/BlockGivenWithExplicitBlock
151
+ nested_schema = ParameterBuilder.new(parent_type: type).build(&block)
154
152
 
155
153
  case type
156
154
  when "object"
@@ -1,9 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "securerandom"
4
- require "json"
5
4
  require "timeout"
6
- require "uri"
7
5
 
8
6
  module Langchain::Vectorsearch
9
7
  class Epsilla < Base
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.15.4"
4
+ VERSION = "0.15.6"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -4,6 +4,9 @@ require "logger"
4
4
  require "pathname"
5
5
  require "rainbow"
6
6
  require "zeitwerk"
7
+ require "uri"
8
+ require "json"
9
+
7
10
  loader = Zeitwerk::Loader.for_gem
8
11
  loader.ignore("#{__dir__}/langchainrb.rb")
9
12
  loader.inflector.inflect(
@@ -30,6 +33,7 @@ loader.collapse("#{__dir__}/langchain/assistants")
30
33
 
31
34
  loader.collapse("#{__dir__}/langchain/tool/calculator")
32
35
  loader.collapse("#{__dir__}/langchain/tool/database")
36
+ loader.collapse("#{__dir__}/langchain/tool/docs_tool")
33
37
  loader.collapse("#{__dir__}/langchain/tool/file_system")
34
38
  loader.collapse("#{__dir__}/langchain/tool/google_search")
35
39
  loader.collapse("#{__dir__}/langchain/tool/ruby_code_interpreter")
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.4
4
+ version: 0.15.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-08-30 00:00:00.000000000 Z
11
+ date: 2024-09-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -712,6 +712,7 @@ files:
712
712
  - lib/langchain/llm/replicate.rb
713
713
  - lib/langchain/llm/response/ai21_response.rb
714
714
  - lib/langchain/llm/response/anthropic_response.rb
715
+ - lib/langchain/llm/response/aws_bedrock_meta_response.rb
715
716
  - lib/langchain/llm/response/aws_titan_response.rb
716
717
  - lib/langchain/llm/response/base_response.rb
717
718
  - lib/langchain/llm/response/cohere_response.rb