langchainrb 0.14.0 → 0.15.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/lib/langchain/assistants/assistant.rb +175 -131
- data/lib/langchain/assistants/messages/ollama_message.rb +9 -21
- data/lib/langchain/contextual_logger.rb +2 -2
- data/lib/langchain/llm/google_gemini.rb +1 -1
- data/lib/langchain/llm/ollama.rb +23 -17
- data/lib/langchain/llm/openai.rb +1 -1
- data/lib/langchain/llm/response/ollama_response.rb +1 -15
- data/lib/langchain/llm/unified_parameters.rb +2 -2
- data/lib/langchain/tool/calculator.rb +38 -0
- data/lib/langchain/tool/{database/database.rb → database.rb} +24 -12
- data/lib/langchain/tool/file_system.rb +44 -0
- data/lib/langchain/tool/{google_search/google_search.rb → google_search.rb} +17 -23
- data/lib/langchain/tool/{news_retriever/news_retriever.rb → news_retriever.rb} +41 -14
- data/lib/langchain/tool/ruby_code_interpreter.rb +41 -0
- data/lib/langchain/tool/{tavily/tavily.rb → tavily.rb} +24 -10
- data/lib/langchain/tool/vectorsearch.rb +40 -0
- data/lib/langchain/tool/{weather/weather.rb → weather.rb} +21 -17
- data/lib/langchain/tool/{wikipedia/wikipedia.rb → wikipedia.rb} +17 -13
- data/lib/langchain/tool_definition.rb +212 -0
- data/lib/langchain/utils/hash_transformer.rb +9 -17
- data/lib/langchain/vectorsearch/chroma.rb +2 -2
- data/lib/langchain/vectorsearch/elasticsearch.rb +2 -2
- data/lib/langchain/vectorsearch/epsilla.rb +3 -3
- data/lib/langchain/vectorsearch/milvus.rb +2 -2
- data/lib/langchain/vectorsearch/pgvector.rb +2 -2
- data/lib/langchain/vectorsearch/pinecone.rb +2 -2
- data/lib/langchain/vectorsearch/qdrant.rb +2 -2
- data/lib/langchain/vectorsearch/weaviate.rb +4 -4
- data/lib/langchain/version.rb +1 -1
- metadata +13 -23
- data/lib/langchain/tool/base.rb +0 -107
- data/lib/langchain/tool/calculator/calculator.json +0 -19
- data/lib/langchain/tool/calculator/calculator.rb +0 -34
- data/lib/langchain/tool/database/database.json +0 -46
- data/lib/langchain/tool/file_system/file_system.json +0 -57
- data/lib/langchain/tool/file_system/file_system.rb +0 -32
- data/lib/langchain/tool/google_search/google_search.json +0 -19
- data/lib/langchain/tool/news_retriever/news_retriever.json +0 -122
- data/lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.json +0 -19
- data/lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.rb +0 -37
- data/lib/langchain/tool/tavily/tavily.json +0 -54
- data/lib/langchain/tool/vectorsearch/vectorsearch.json +0 -24
- data/lib/langchain/tool/vectorsearch/vectorsearch.rb +0 -36
- data/lib/langchain/tool/weather/weather.json +0 -19
- data/lib/langchain/tool/wikipedia/wikipedia.json +0 -19
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: dde504e05b1cbb32c857569bf71301537fed2deb468f1bdd69a7ef900a41c085
|
4
|
+
data.tar.gz: '08659cddd6f0bb285e167c7a35dbd2f83c2e9bb51a69206217ea91649e99839c'
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ce4dd091498659a2d8dda4b54e9e9584dc19be5f390dc5f1d98efa054a264134dc3510f2f83c65bdf23edfbd7344587b91113e69c2ea1fea2cdc157317735799
|
7
|
+
data.tar.gz: a6df110aa7d96c87402164f67aadab0a97e2a62b68b7466cf630fe79dd0611a1740ae11163361eef9c98fc816f7ba12d7bfc0aa2225759cc8191f59fead8fcbd
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,13 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.15.0] - 2024-08-14
|
4
|
+
- Fix Langchain::Assistant when llm is Anthropic
|
5
|
+
- Fix GoogleGemini#chat method
|
6
|
+
- Langchain::LLM::Weaviate initializer does not require api_key anymore
|
7
|
+
- [BREAKING] Langchain::LLM::OpenAI#chat() uses `gpt-4o-mini` by default instead of `gpt-3.5-turbo` previously.
|
8
|
+
- [BREAKING] Assistant works with a number of open-source models via Ollama.
|
9
|
+
- [BREAKING] Introduce new `Langchain::ToolDefinition` module to define tools. This replaces the previous reliance on subclassing from `Langchain::Tool::Base`.
|
10
|
+
|
3
11
|
## [0.14.0] - 2024-07-12
|
4
12
|
- Removed TokenLength validators
|
5
13
|
- Assistant works with a Mistral LLM now
|
@@ -19,14 +19,6 @@ module Langchain
|
|
19
19
|
attr_reader :total_prompt_tokens, :total_completion_tokens, :total_tokens
|
20
20
|
attr_accessor :tools
|
21
21
|
|
22
|
-
SUPPORTED_LLMS = [
|
23
|
-
Langchain::LLM::Anthropic,
|
24
|
-
Langchain::LLM::GoogleGemini,
|
25
|
-
Langchain::LLM::GoogleVertexAI,
|
26
|
-
Langchain::LLM::Ollama,
|
27
|
-
Langchain::LLM::OpenAI
|
28
|
-
]
|
29
|
-
|
30
22
|
# Create a new assistant
|
31
23
|
#
|
32
24
|
# @param llm [Langchain::LLM::Base] LLM instance that the assistant will use
|
@@ -39,15 +31,12 @@ module Langchain
|
|
39
31
|
tools: [],
|
40
32
|
instructions: nil
|
41
33
|
)
|
42
|
-
unless
|
43
|
-
raise ArgumentError, "
|
44
|
-
end
|
45
|
-
if llm.is_a?(Langchain::LLM::Ollama)
|
46
|
-
raise ArgumentError, "Currently only `mistral:7b-instruct-v0.3-fp16` model is supported for Ollama LLM" unless llm.defaults[:completion_model_name] == "mistral:7b-instruct-v0.3-fp16"
|
34
|
+
unless tools.is_a?(Array) && tools.all? { |tool| tool.class.singleton_class.included_modules.include?(Langchain::ToolDefinition) }
|
35
|
+
raise ArgumentError, "Tools must be an array of objects extending Langchain::ToolDefinition"
|
47
36
|
end
|
48
|
-
raise ArgumentError, "Tools must be an array of Langchain::Tool::Base instance(s)" unless tools.is_a?(Array) && tools.all? { |tool| tool.is_a?(Langchain::Tool::Base) }
|
49
37
|
|
50
38
|
@llm = llm
|
39
|
+
@llm_adapter = LLM::Adapter.build(llm)
|
51
40
|
@thread = thread || Langchain::Thread.new
|
52
41
|
@tools = tools
|
53
42
|
@instructions = instructions
|
@@ -214,14 +203,7 @@ module Langchain
|
|
214
203
|
def handle_user_or_tool_message
|
215
204
|
response = chat_with_llm
|
216
205
|
|
217
|
-
|
218
|
-
content = if llm.is_a?(Langchain::LLM::Ollama)
|
219
|
-
response.completion
|
220
|
-
else
|
221
|
-
response.chat_completion
|
222
|
-
end
|
223
|
-
|
224
|
-
add_message(role: response.role, content: content, tool_calls: response.tool_calls)
|
206
|
+
add_message(role: response.role, content: response.chat_completion, tool_calls: response.tool_calls)
|
225
207
|
record_used_tokens(response.prompt_tokens, response.completion_tokens, response.total_tokens)
|
226
208
|
|
227
209
|
set_state_for(response: response)
|
@@ -247,7 +229,7 @@ module Langchain
|
|
247
229
|
run_tools(thread.messages.last.tool_calls)
|
248
230
|
:in_progress
|
249
231
|
rescue => e
|
250
|
-
Langchain.logger.error("Error running tools: #{e.message}")
|
232
|
+
Langchain.logger.error("Error running tools: #{e.message}; #{e.backtrace.join('\n')}")
|
251
233
|
:failed
|
252
234
|
end
|
253
235
|
|
@@ -268,17 +250,7 @@ module Langchain
|
|
268
250
|
end
|
269
251
|
|
270
252
|
def initialize_instructions
|
271
|
-
if llm.is_a?(Langchain::LLM::
|
272
|
-
content = String.new # rubocop: disable Performance/UnfreezeString
|
273
|
-
if tools.any?
|
274
|
-
content << %([AVAILABLE_TOOLS] #{tools.map(&:to_openai_tools).flatten}[/AVAILABLE_TOOLS])
|
275
|
-
end
|
276
|
-
if instructions
|
277
|
-
content << "[INST] #{instructions}[/INST]"
|
278
|
-
end
|
279
|
-
|
280
|
-
add_message(role: "system", content: content)
|
281
|
-
elsif llm.is_a?(Langchain::LLM::OpenAI)
|
253
|
+
if llm.is_a?(Langchain::LLM::OpenAI)
|
282
254
|
add_message(role: "system", content: instructions) if instructions
|
283
255
|
end
|
284
256
|
end
|
@@ -289,36 +261,12 @@ module Langchain
|
|
289
261
|
def chat_with_llm
|
290
262
|
Langchain.logger.info("Sending a call to #{llm.class}", for: self.class)
|
291
263
|
|
292
|
-
params =
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
end
|
299
|
-
elsif llm.is_a?(Langchain::LLM::Anthropic)
|
300
|
-
if tools.any?
|
301
|
-
params[:tools] = tools.map(&:to_anthropic_tools).flatten
|
302
|
-
params[:tool_choice] = {type: "auto"}
|
303
|
-
end
|
304
|
-
params[:system] = instructions if instructions
|
305
|
-
elsif [Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI].include?(llm.class)
|
306
|
-
if tools.any?
|
307
|
-
params[:tools] = tools.map(&:to_google_gemini_tools).flatten
|
308
|
-
params[:system] = instructions if instructions
|
309
|
-
params[:tool_choice] = "auto"
|
310
|
-
end
|
311
|
-
end
|
312
|
-
# TODO: Not sure that tool_choice should always be "auto"; Maybe we can let the user toggle it.
|
313
|
-
|
314
|
-
if llm.is_a?(Langchain::LLM::Ollama)
|
315
|
-
params[:raw] = true
|
316
|
-
params[:prompt] = thread.prompt_of_concatenated_messages
|
317
|
-
llm.complete(**params)
|
318
|
-
else
|
319
|
-
params[:messages] = thread.array_of_message_hashes
|
320
|
-
llm.chat(**params)
|
321
|
-
end
|
264
|
+
params = @llm_adapter.build_chat_params(
|
265
|
+
tools: @tools,
|
266
|
+
instructions: @instructions,
|
267
|
+
messages: thread.array_of_message_hashes
|
268
|
+
)
|
269
|
+
@llm.chat(**params)
|
322
270
|
end
|
323
271
|
|
324
272
|
# Run the tools automatically
|
@@ -327,18 +275,10 @@ module Langchain
|
|
327
275
|
def run_tools(tool_calls)
|
328
276
|
# Iterate over each function invocation and submit tool output
|
329
277
|
tool_calls.each do |tool_call|
|
330
|
-
tool_call_id, tool_name, method_name, tool_arguments =
|
331
|
-
extract_ollama_tool_call(tool_call: tool_call)
|
332
|
-
elsif llm.is_a?(Langchain::LLM::OpenAI)
|
333
|
-
extract_openai_tool_call(tool_call: tool_call)
|
334
|
-
elsif [Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI].include?(llm.class)
|
335
|
-
extract_google_gemini_tool_call(tool_call: tool_call)
|
336
|
-
elsif llm.is_a?(Langchain::LLM::Anthropic)
|
337
|
-
extract_anthropic_tool_call(tool_call: tool_call)
|
338
|
-
end
|
278
|
+
tool_call_id, tool_name, method_name, tool_arguments = @llm_adapter.extract_tool_call_args(tool_call: tool_call)
|
339
279
|
|
340
280
|
tool_instance = tools.find do |t|
|
341
|
-
t.
|
281
|
+
t.class.tool_name == tool_name
|
342
282
|
end or raise ArgumentError, "Tool not found in assistant.tools"
|
343
283
|
|
344
284
|
output = tool_instance.send(method_name, **tool_arguments)
|
@@ -347,54 +287,6 @@ module Langchain
|
|
347
287
|
end
|
348
288
|
end
|
349
289
|
|
350
|
-
def extract_ollama_tool_call(tool_call:)
|
351
|
-
tool_name, method_name = tool_call.dig("name").split("__")
|
352
|
-
tool_arguments = tool_call.dig("arguments").transform_keys(&:to_sym)
|
353
|
-
[nil, tool_name, method_name, tool_arguments]
|
354
|
-
end
|
355
|
-
|
356
|
-
# Extract the tool call information from the OpenAI tool call hash
|
357
|
-
#
|
358
|
-
# @param tool_call [Hash] The tool call hash
|
359
|
-
# @return [Array] The tool call information
|
360
|
-
def extract_openai_tool_call(tool_call:)
|
361
|
-
tool_call_id = tool_call.dig("id")
|
362
|
-
|
363
|
-
function_name = tool_call.dig("function", "name")
|
364
|
-
tool_name, method_name = function_name.split("__")
|
365
|
-
tool_arguments = JSON.parse(tool_call.dig("function", "arguments"), symbolize_names: true)
|
366
|
-
|
367
|
-
[tool_call_id, tool_name, method_name, tool_arguments]
|
368
|
-
end
|
369
|
-
|
370
|
-
# Extract the tool call information from the Anthropic tool call hash
|
371
|
-
#
|
372
|
-
# @param tool_call [Hash] The tool call hash, format: {"type"=>"tool_use", "id"=>"toolu_01TjusbFApEbwKPRWTRwzadR", "name"=>"news_retriever__get_top_headlines", "input"=>{"country"=>"us", "page_size"=>10}}], "stop_reason"=>"tool_use"}
|
373
|
-
# @return [Array] The tool call information
|
374
|
-
def extract_anthropic_tool_call(tool_call:)
|
375
|
-
tool_call_id = tool_call.dig("id")
|
376
|
-
|
377
|
-
function_name = tool_call.dig("name")
|
378
|
-
tool_name, method_name = function_name.split("__")
|
379
|
-
tool_arguments = tool_call.dig("input").transform_keys(&:to_sym)
|
380
|
-
|
381
|
-
[tool_call_id, tool_name, method_name, tool_arguments]
|
382
|
-
end
|
383
|
-
|
384
|
-
# Extract the tool call information from the Google Gemini tool call hash
|
385
|
-
#
|
386
|
-
# @param tool_call [Hash] The tool call hash, format: {"functionCall"=>{"name"=>"weather__execute", "args"=>{"input"=>"NYC"}}}
|
387
|
-
# @return [Array] The tool call information
|
388
|
-
def extract_google_gemini_tool_call(tool_call:)
|
389
|
-
tool_call_id = tool_call.dig("functionCall", "name")
|
390
|
-
|
391
|
-
function_name = tool_call.dig("functionCall", "name")
|
392
|
-
tool_name, method_name = function_name.split("__")
|
393
|
-
tool_arguments = tool_call.dig("functionCall", "args").transform_keys(&:to_sym)
|
394
|
-
|
395
|
-
[tool_call_id, tool_name, method_name, tool_arguments]
|
396
|
-
end
|
397
|
-
|
398
290
|
# Build a message
|
399
291
|
#
|
400
292
|
# @param role [String] The role of the message
|
@@ -403,15 +295,7 @@ module Langchain
|
|
403
295
|
# @param tool_call_id [String] The ID of the tool call to include in the message
|
404
296
|
# @return [Langchain::Message] The Message object
|
405
297
|
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
406
|
-
|
407
|
-
Langchain::Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
408
|
-
elsif llm.is_a?(Langchain::LLM::OpenAI)
|
409
|
-
Langchain::Messages::OpenAIMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
410
|
-
elsif [Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI].include?(llm.class)
|
411
|
-
Langchain::Messages::GoogleGeminiMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
412
|
-
elsif llm.is_a?(Langchain::LLM::Anthropic)
|
413
|
-
Langchain::Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
414
|
-
end
|
298
|
+
@llm_adapter.build_message(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
415
299
|
end
|
416
300
|
|
417
301
|
# Increment the tokens count based on the last interaction with the LLM
|
@@ -427,5 +311,165 @@ module Langchain
|
|
427
311
|
end
|
428
312
|
|
429
313
|
# TODO: Fix the message truncation when context window is exceeded
|
314
|
+
|
315
|
+
module LLM
|
316
|
+
class Adapter
|
317
|
+
def self.build(llm)
|
318
|
+
case llm
|
319
|
+
when Langchain::LLM::Ollama
|
320
|
+
Adapters::Ollama.new
|
321
|
+
when Langchain::LLM::OpenAI
|
322
|
+
Adapters::OpenAI.new
|
323
|
+
when Langchain::LLM::GoogleGemini, Langchain::LLM::GoogleVertexAI
|
324
|
+
Adapters::GoogleGemini.new
|
325
|
+
when Langchain::LLM::Anthropic
|
326
|
+
Adapters::Anthropic.new
|
327
|
+
else
|
328
|
+
raise ArgumentError, "Unsupported LLM type: #{llm.class}"
|
329
|
+
end
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
module Adapters
|
334
|
+
class Base
|
335
|
+
def build_chat_params(tools:, instructions:, messages:)
|
336
|
+
raise NotImplementedError, "Subclasses must implement build_chat_params"
|
337
|
+
end
|
338
|
+
|
339
|
+
def extract_tool_call_args(tool_call:)
|
340
|
+
raise NotImplementedError, "Subclasses must implement extract_tool_call_args"
|
341
|
+
end
|
342
|
+
|
343
|
+
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
344
|
+
raise NotImplementedError, "Subclasses must implement build_message"
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
class Ollama < Base
|
349
|
+
def build_chat_params(tools:, instructions:, messages:)
|
350
|
+
params = {messages: messages}
|
351
|
+
if tools.any?
|
352
|
+
params[:tools] = tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
|
353
|
+
end
|
354
|
+
params
|
355
|
+
end
|
356
|
+
|
357
|
+
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
358
|
+
Langchain::Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
359
|
+
end
|
360
|
+
|
361
|
+
# Extract the tool call information from the OpenAI tool call hash
|
362
|
+
#
|
363
|
+
# @param tool_call [Hash] The tool call hash
|
364
|
+
# @return [Array] The tool call information
|
365
|
+
def extract_tool_call_args(tool_call:)
|
366
|
+
tool_call_id = tool_call.dig("id")
|
367
|
+
|
368
|
+
function_name = tool_call.dig("function", "name")
|
369
|
+
tool_name, method_name = function_name.split("__")
|
370
|
+
|
371
|
+
tool_arguments = tool_call.dig("function", "arguments")
|
372
|
+
tool_arguments = if tool_arguments.is_a?(Hash)
|
373
|
+
Langchain::Utils::HashTransformer.symbolize_keys(tool_arguments)
|
374
|
+
else
|
375
|
+
JSON.parse(tool_arguments, symbolize_names: true)
|
376
|
+
end
|
377
|
+
|
378
|
+
[tool_call_id, tool_name, method_name, tool_arguments]
|
379
|
+
end
|
380
|
+
end
|
381
|
+
|
382
|
+
class OpenAI < Base
|
383
|
+
def build_chat_params(tools:, instructions:, messages:)
|
384
|
+
params = {messages: messages}
|
385
|
+
if tools.any?
|
386
|
+
params[:tools] = tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
|
387
|
+
params[:tool_choice] = "auto"
|
388
|
+
end
|
389
|
+
params
|
390
|
+
end
|
391
|
+
|
392
|
+
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
393
|
+
Langchain::Messages::OpenAIMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
394
|
+
end
|
395
|
+
|
396
|
+
# Extract the tool call information from the OpenAI tool call hash
|
397
|
+
#
|
398
|
+
# @param tool_call [Hash] The tool call hash
|
399
|
+
# @return [Array] The tool call information
|
400
|
+
def extract_tool_call_args(tool_call:)
|
401
|
+
tool_call_id = tool_call.dig("id")
|
402
|
+
|
403
|
+
function_name = tool_call.dig("function", "name")
|
404
|
+
tool_name, method_name = function_name.split("__")
|
405
|
+
|
406
|
+
tool_arguments = tool_call.dig("function", "arguments")
|
407
|
+
tool_arguments = if tool_arguments.is_a?(Hash)
|
408
|
+
Langchain::Utils::HashTransformer.symbolize_keys(tool_arguments)
|
409
|
+
else
|
410
|
+
JSON.parse(tool_arguments, symbolize_names: true)
|
411
|
+
end
|
412
|
+
|
413
|
+
[tool_call_id, tool_name, method_name, tool_arguments]
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
class GoogleGemini < Base
|
418
|
+
def build_chat_params(tools:, instructions:, messages:)
|
419
|
+
params = {messages: messages}
|
420
|
+
if tools.any?
|
421
|
+
params[:tools] = tools.map { |tool| tool.class.function_schemas.to_google_gemini_format }.flatten
|
422
|
+
params[:system] = instructions if instructions
|
423
|
+
params[:tool_choice] = "auto"
|
424
|
+
end
|
425
|
+
params
|
426
|
+
end
|
427
|
+
|
428
|
+
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
429
|
+
Langchain::Messages::GoogleGeminiMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
430
|
+
end
|
431
|
+
|
432
|
+
# Extract the tool call information from the Google Gemini tool call hash
|
433
|
+
#
|
434
|
+
# @param tool_call [Hash] The tool call hash, format: {"functionCall"=>{"name"=>"weather__execute", "args"=>{"input"=>"NYC"}}}
|
435
|
+
# @return [Array] The tool call information
|
436
|
+
def extract_tool_call_args(tool_call:)
|
437
|
+
tool_call_id = tool_call.dig("functionCall", "name")
|
438
|
+
function_name = tool_call.dig("functionCall", "name")
|
439
|
+
tool_name, method_name = function_name.split("__")
|
440
|
+
tool_arguments = tool_call.dig("functionCall", "args").transform_keys(&:to_sym)
|
441
|
+
[tool_call_id, tool_name, method_name, tool_arguments]
|
442
|
+
end
|
443
|
+
end
|
444
|
+
|
445
|
+
class Anthropic < Base
|
446
|
+
def build_chat_params(tools:, instructions:, messages:)
|
447
|
+
params = {messages: messages}
|
448
|
+
if tools.any?
|
449
|
+
params[:tools] = tools.map { |tool| tool.class.function_schemas.to_anthropic_format }.flatten
|
450
|
+
params[:tool_choice] = {type: "auto"}
|
451
|
+
end
|
452
|
+
params[:system] = instructions if instructions
|
453
|
+
params
|
454
|
+
end
|
455
|
+
|
456
|
+
def build_message(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
457
|
+
Langchain::Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
|
458
|
+
end
|
459
|
+
|
460
|
+
# Extract the tool call information from the Anthropic tool call hash
|
461
|
+
#
|
462
|
+
# @param tool_call [Hash] The tool call hash, format: {"type"=>"tool_use", "id"=>"toolu_01TjusbFApEbwKPRWTRwzadR", "name"=>"news_retriever__get_top_headlines", "input"=>{"country"=>"us", "page_size"=>10}}], "stop_reason"=>"tool_use"}
|
463
|
+
# @return [Array] The tool call information
|
464
|
+
def extract_tool_call_args(tool_call:)
|
465
|
+
tool_call_id = tool_call.dig("id")
|
466
|
+
function_name = tool_call.dig("name")
|
467
|
+
tool_name, method_name = function_name.split("__")
|
468
|
+
tool_arguments = tool_call.dig("input").transform_keys(&:to_sym)
|
469
|
+
[tool_call_id, tool_name, method_name, tool_arguments]
|
470
|
+
end
|
471
|
+
end
|
472
|
+
end
|
473
|
+
end
|
430
474
|
end
|
431
475
|
end
|
@@ -30,27 +30,15 @@ module Langchain
|
|
30
30
|
@tool_call_id = tool_call_id
|
31
31
|
end
|
32
32
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
"[INST] #{content}[/INST]"
|
43
|
-
end
|
44
|
-
|
45
|
-
def to_tool_message_string
|
46
|
-
"[TOOL_RESULTS] #{content}[/TOOL_RESULTS]"
|
47
|
-
end
|
48
|
-
|
49
|
-
def to_assistant_message_string
|
50
|
-
if tool_calls.any?
|
51
|
-
%("[TOOL_CALLS] #{tool_calls}")
|
52
|
-
else
|
53
|
-
content
|
33
|
+
# Convert the message to an OpenAI API-compatible hash
|
34
|
+
#
|
35
|
+
# @return [Hash] The message as an OpenAI API-compatible hash
|
36
|
+
def to_hash
|
37
|
+
{}.tap do |h|
|
38
|
+
h[:role] = role
|
39
|
+
h[:content] = content if content # Content is nil for tool calls
|
40
|
+
h[:tool_calls] = tool_calls if tool_calls.any?
|
41
|
+
h[:tool_call_id] = tool_call_id if tool_call_id
|
54
42
|
end
|
55
43
|
end
|
56
44
|
|
@@ -35,8 +35,8 @@ module Langchain
|
|
35
35
|
@logger.respond_to?(method, include_private)
|
36
36
|
end
|
37
37
|
|
38
|
-
def method_missing(method, *args, **kwargs, &)
|
39
|
-
return @logger.send(method, *args, **kwargs, &) unless @levels.include?(method)
|
38
|
+
def method_missing(method, *args, **kwargs, &block)
|
39
|
+
return @logger.send(method, *args, **kwargs, &block) unless @levels.include?(method)
|
40
40
|
|
41
41
|
for_class = kwargs.delete(:for)
|
42
42
|
for_class_name = for_class&.name
|
@@ -62,7 +62,7 @@ module Langchain::LLM
|
|
62
62
|
|
63
63
|
request = Net::HTTP::Post.new(uri)
|
64
64
|
request.content_type = "application/json"
|
65
|
-
request.body =
|
65
|
+
request.body = parameters.to_json
|
66
66
|
|
67
67
|
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
|
68
68
|
http.request(request)
|
data/lib/langchain/llm/ollama.rb
CHANGED
@@ -1,7 +1,5 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "active_support/core_ext/hash"
|
4
|
-
|
5
3
|
module Langchain::LLM
|
6
4
|
# Interface to Ollama API.
|
7
5
|
# Available models: https://ollama.ai/library
|
@@ -15,9 +13,9 @@ module Langchain::LLM
|
|
15
13
|
|
16
14
|
DEFAULTS = {
|
17
15
|
temperature: 0.0,
|
18
|
-
completion_model_name: "llama3",
|
19
|
-
embeddings_model_name: "llama3",
|
20
|
-
chat_completion_model_name: "llama3"
|
16
|
+
completion_model_name: "llama3.1",
|
17
|
+
embeddings_model_name: "llama3.1",
|
18
|
+
chat_completion_model_name: "llama3.1"
|
21
19
|
}.freeze
|
22
20
|
|
23
21
|
EMBEDDING_SIZES = {
|
@@ -25,20 +23,24 @@ module Langchain::LLM
|
|
25
23
|
"dolphin-mixtral": 4_096,
|
26
24
|
llama2: 4_096,
|
27
25
|
llama3: 4_096,
|
26
|
+
"llama3.1": 4_096,
|
28
27
|
llava: 4_096,
|
29
28
|
mistral: 4_096,
|
30
29
|
"mistral-openorca": 4_096,
|
31
|
-
mixtral: 4_096
|
30
|
+
mixtral: 4_096,
|
31
|
+
tinydolphin: 2_048
|
32
32
|
}.freeze
|
33
33
|
|
34
34
|
# Initialize the Ollama client
|
35
35
|
# @param url [String] The URL of the Ollama instance
|
36
|
+
# @param api_key [String] The API key to use. This is optional and used when you expose Ollama API using Open WebUI
|
36
37
|
# @param default_options [Hash] The default options to use
|
37
38
|
#
|
38
|
-
def initialize(url: "http://localhost:11434", default_options: {})
|
39
|
+
def initialize(url: "http://localhost:11434", api_key: nil, default_options: {})
|
39
40
|
depends_on "faraday"
|
40
41
|
@url = url
|
41
|
-
@
|
42
|
+
@api_key = api_key
|
43
|
+
@defaults = DEFAULTS.merge(default_options)
|
42
44
|
chat_parameters.update(
|
43
45
|
model: {default: @defaults[:chat_completion_model_name]},
|
44
46
|
temperature: {default: @defaults[:temperature]},
|
@@ -113,7 +115,7 @@ module Langchain::LLM
|
|
113
115
|
system: system,
|
114
116
|
template: template,
|
115
117
|
context: context,
|
116
|
-
stream:
|
118
|
+
stream: block_given?, # rubocop:disable Performance/BlockGivenWithExplicitBlock
|
117
119
|
raw: raw
|
118
120
|
}.compact
|
119
121
|
|
@@ -173,7 +175,7 @@ module Langchain::LLM
|
|
173
175
|
# content: the content of the message
|
174
176
|
# images (optional): a list of images to include in the message (for multimodal models such as llava)
|
175
177
|
def chat(messages:, model: nil, **params, &block)
|
176
|
-
parameters = chat_parameters.to_params(params.merge(messages:, model:, stream:
|
178
|
+
parameters = chat_parameters.to_params(params.merge(messages:, model:, stream: block_given?)) # rubocop:disable Performance/BlockGivenWithExplicitBlock
|
177
179
|
responses_stream = []
|
178
180
|
|
179
181
|
client.post("api/chat", parameters) do |req|
|
@@ -264,13 +266,20 @@ module Langchain::LLM
|
|
264
266
|
private
|
265
267
|
|
266
268
|
def client
|
267
|
-
@client ||= Faraday.new(url: url) do |conn|
|
269
|
+
@client ||= Faraday.new(url: url, headers: auth_headers) do |conn|
|
268
270
|
conn.request :json
|
269
271
|
conn.response :json
|
270
272
|
conn.response :raise_error
|
273
|
+
conn.response :logger, nil, {headers: true, bodies: true, errors: true}
|
271
274
|
end
|
272
275
|
end
|
273
276
|
|
277
|
+
def auth_headers
|
278
|
+
return unless @api_key
|
279
|
+
|
280
|
+
{"Authorization" => "Bearer #{@api_key}"}
|
281
|
+
end
|
282
|
+
|
274
283
|
def json_responses_chunk_handler(&block)
|
275
284
|
proc do |chunk, _size|
|
276
285
|
chunk.split("\n").each do |chunk_line|
|
@@ -288,13 +297,10 @@ module Langchain::LLM
|
|
288
297
|
OllamaResponse.new(final_response, model: parameters[:model])
|
289
298
|
end
|
290
299
|
|
300
|
+
# BUG: If streamed, this method does not currently return the tool_calls response.
|
291
301
|
def generate_final_chat_completion_response(responses_stream, parameters)
|
292
|
-
final_response = responses_stream.last
|
293
|
-
|
294
|
-
"role" => "assistant",
|
295
|
-
"content" => responses_stream.map { |resp| resp.dig("message", "content") }.join
|
296
|
-
}
|
297
|
-
)
|
302
|
+
final_response = responses_stream.last
|
303
|
+
final_response["message"]["content"] = responses_stream.map { |resp| resp.dig("message", "content") }.join
|
298
304
|
|
299
305
|
OllamaResponse.new(final_response, model: parameters[:model])
|
300
306
|
end
|
data/lib/langchain/llm/openai.rb
CHANGED
@@ -48,21 +48,7 @@ module Langchain::LLM
|
|
48
48
|
end
|
49
49
|
|
50
50
|
def tool_calls
|
51
|
-
|
52
|
-
[parsed_tool_calls]
|
53
|
-
elsif completion&.include?("[TOOL_CALLS]") && (
|
54
|
-
parsed_tool_calls = JSON.parse(
|
55
|
-
completion
|
56
|
-
# Slice out the serialize JSON
|
57
|
-
.slice(/\{.*\}/)
|
58
|
-
# Replace hash rocket with colon
|
59
|
-
.gsub("=>", ":")
|
60
|
-
)
|
61
|
-
)
|
62
|
-
[parsed_tool_calls]
|
63
|
-
else
|
64
|
-
[]
|
65
|
-
end
|
51
|
+
Array(raw_response.dig("message", "tool_calls"))
|
66
52
|
end
|
67
53
|
|
68
54
|
private
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::Tool
|
4
|
+
#
|
5
|
+
# A calculator tool that falls back to the Google calculator widget
|
6
|
+
#
|
7
|
+
# Gem requirements:
|
8
|
+
# gem "eqn", "~> 1.6.5"
|
9
|
+
# gem "google_search_results", "~> 2.0.0"
|
10
|
+
#
|
11
|
+
# Usage:
|
12
|
+
# calculator = Langchain::Tool::Calculator.new
|
13
|
+
#
|
14
|
+
class Calculator
|
15
|
+
extend Langchain::ToolDefinition
|
16
|
+
include Langchain::DependencyHelper
|
17
|
+
|
18
|
+
define_function :execute, description: "Evaluates a pure math expression or if equation contains non-math characters (e.g.: \"12F in Celsius\") then it uses the google search calculator to evaluate the expression" do
|
19
|
+
property :input, type: "string", description: "Math expression", required: true
|
20
|
+
end
|
21
|
+
|
22
|
+
def initialize
|
23
|
+
depends_on "eqn"
|
24
|
+
end
|
25
|
+
|
26
|
+
# Evaluates a pure math expression or if equation contains non-math characters (e.g.: "12F in Celsius") then it uses the google search calculator to evaluate the expression
|
27
|
+
#
|
28
|
+
# @param input [String] math expression
|
29
|
+
# @return [String] Answer
|
30
|
+
def execute(input:)
|
31
|
+
Langchain.logger.info("Executing \"#{input}\"", for: self.class)
|
32
|
+
|
33
|
+
Eqn::Calculator.calc(input)
|
34
|
+
rescue Eqn::ParseError, Eqn::NoVariableValueError
|
35
|
+
"\"#{input}\" is an invalid mathematical expression"
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|