langchainrb 0.17.1 → 0.19.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +30 -0
  3. data/README.md +7 -2
  4. data/lib/langchain/{assistants → assistant}/llm/adapter.rb +1 -1
  5. data/lib/langchain/assistant/llm/adapters/anthropic.rb +105 -0
  6. data/lib/langchain/assistant/llm/adapters/base.rb +63 -0
  7. data/lib/langchain/{assistants → assistant}/llm/adapters/google_gemini.rb +43 -3
  8. data/lib/langchain/{assistants → assistant}/llm/adapters/mistral_ai.rb +39 -2
  9. data/lib/langchain/assistant/llm/adapters/ollama.rb +94 -0
  10. data/lib/langchain/{assistants → assistant}/llm/adapters/openai.rb +38 -2
  11. data/lib/langchain/assistant/messages/anthropic_message.rb +83 -0
  12. data/lib/langchain/assistant/messages/base.rb +56 -0
  13. data/lib/langchain/assistant/messages/google_gemini_message.rb +92 -0
  14. data/lib/langchain/assistant/messages/mistral_ai_message.rb +143 -0
  15. data/lib/langchain/assistant/messages/ollama_message.rb +76 -0
  16. data/lib/langchain/assistant/messages/openai_message.rb +105 -0
  17. data/lib/langchain/{assistants/assistant.rb → assistant.rb} +26 -48
  18. data/lib/langchain/llm/ai21.rb +1 -1
  19. data/lib/langchain/llm/anthropic.rb +64 -9
  20. data/lib/langchain/llm/aws_bedrock.rb +12 -13
  21. data/lib/langchain/llm/azure.rb +2 -2
  22. data/lib/langchain/llm/base.rb +1 -1
  23. data/lib/langchain/llm/cohere.rb +8 -8
  24. data/lib/langchain/llm/google_gemini.rb +5 -6
  25. data/lib/langchain/llm/google_vertex_ai.rb +6 -5
  26. data/lib/langchain/llm/hugging_face.rb +5 -5
  27. data/lib/langchain/llm/mistral_ai.rb +4 -4
  28. data/lib/langchain/llm/ollama.rb +7 -8
  29. data/lib/langchain/llm/openai.rb +8 -7
  30. data/lib/langchain/llm/parameters/chat.rb +1 -0
  31. data/lib/langchain/llm/replicate.rb +8 -16
  32. data/lib/langchain/tool_definition.rb +7 -0
  33. data/lib/langchain/version.rb +1 -1
  34. data/lib/langchain.rb +1 -14
  35. metadata +16 -16
  36. data/lib/langchain/assistants/llm/adapters/_base.rb +0 -21
  37. data/lib/langchain/assistants/llm/adapters/anthropic.rb +0 -62
  38. data/lib/langchain/assistants/llm/adapters/ollama.rb +0 -57
  39. data/lib/langchain/assistants/messages/anthropic_message.rb +0 -75
  40. data/lib/langchain/assistants/messages/base.rb +0 -54
  41. data/lib/langchain/assistants/messages/google_gemini_message.rb +0 -90
  42. data/lib/langchain/assistants/messages/mistral_ai_message.rb +0 -96
  43. data/lib/langchain/assistants/messages/ollama_message.rb +0 -74
  44. data/lib/langchain/assistants/messages/openai_message.rb +0 -103
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f7061ef2090d35626239ca575b60edb291dbbadab7de85a5a2796792e1691437
4
- data.tar.gz: 30cb1f14b602a22e7df8f2dba42660383d44482cbe83fb35dc9539afa836739c
3
+ metadata.gz: 89fdd6c82d689f6e7057133eab08ef8367e72e211d3a59df423ce26cd3b2c04d
4
+ data.tar.gz: e77493aec62198a014b0296490779b18016407c1808de45020e25b63a8d42d17
5
5
  SHA512:
6
- metadata.gz: dd08fb29bd0ff9237cc27980c3bac607baeb9d54a93f297b1e81fb863b7cbb9720db4adacb3dae92bcbe71d2eb59b38d4de0ee321face467a0b82bde627d2929
7
- data.tar.gz: 4a3661afd2d9d75a64e02f6f173cd0bf0e016207c444ca4506bab907f00dc906f5bbf82c96aad9521280265f214b0f3e82dd5e4ee54dc40f3afb415a6f50b365
6
+ metadata.gz: 9040755869f6cbf666f8ec1225b976d095a809915c533e8102b281bc8c66d4751e93abbef19a4c27d806e5abd9555690a4d89401e7d06c29d8e4e9b6252bff4f
7
+ data.tar.gz: 23287eb713d76ed824e13e4b2e07e66f342a08177b39b7fa2d6fb85af25f3f0ed2bc898d6b1316c2859963526f7cf1e635c413a8786ae671399ff50b87697f82
data/CHANGELOG.md CHANGED
@@ -1,5 +1,35 @@
1
+ # CHANGELOG
2
+
3
+ ## Key
4
+ - [BREAKING]: A breaking change. After an upgrade, your app may need modifications to keep working correctly.
5
+ - [FEATURE]: A non-breaking improvement to the app. Either introduces new functionality, or improves on an existing feature.
6
+ - [BUGFIX]: Fixes a bug with a non-breaking change.
7
+ - [COMPAT]: Compatibility improvements - changes to make Administrate more compatible with different dependency versions.
8
+ - [OPTIM]: Optimization or performance increase.
9
+ - [DOCS]: Documentation changes. No changes to the library's behavior.
10
+ - [SECURITY]: A change which fixes a security vulnerability.
11
+
1
12
  ## [Unreleased]
2
13
 
14
+ ## [0.19.0] - 2024-10-23
15
+ - [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.
16
+ - [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters.
17
+ - [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `embeddings_model_name` parameter to `embedding_model` in Langchain::LLM parameters.
18
+ - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/850/] Fix MistralAIMessage to handle "Tool" Output
19
+ - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic
20
+ - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message
21
+ - [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/838] Allow setting safety_settings: [] in default_options for Langchain::LLM::GoogleGemini and Langchain::LLM::GoogleVertexAI constructors
22
+
23
+ ## [0.18.0] - 2024-10-12
24
+ - [BREAKING] Remove `Langchain::Assistant#clear_thread!` method
25
+ - [BREAKING] `Langchain::Messages::*` namespace had migrated to `Langchain::Assistant::Messages::*`
26
+ - [BREAKING] Modify `Langchain::LLM::AwsBedrock` constructor to pass model options via default_options: {...}
27
+ - Introduce `Langchain::Assistant#parallel_tool_calls` options whether to allow the LLM to make multiple parallel tool calls. Default: true
28
+ - Minor improvements to the Langchain::Assistant class
29
+ - Added support for streaming with Anthropic
30
+ - Bump anthropic gem
31
+ - Default Langchain::LLM::Anthropic chat model is "claude-3-5-sonnet-20240620" now
32
+
3
33
  ## [0.17.1] - 2024-10-07
4
34
  - Move Langchain::Assistant::LLM::Adapter-related classes to separate files
5
35
  - Fix Langchain::Tool::Database#describe_table method
data/README.md CHANGED
@@ -86,7 +86,7 @@ Most LLM classes can be initialized with an API key and optional default options
86
86
  ```ruby
87
87
  llm = Langchain::LLM::OpenAI.new(
88
88
  api_key: ENV["OPENAI_API_KEY"],
89
- default_options: { temperature: 0.7, chat_completion_model_name: "gpt-4o" }
89
+ default_options: { temperature: 0.7, chat_model: "gpt-4o" }
90
90
  )
91
91
  ```
92
92
 
@@ -132,6 +132,8 @@ Use the `chat` method to generate chat completions:
132
132
  messages = [
133
133
  { role: "system", content: "You are a helpful assistant." },
134
134
  { role: "user", content: "What's the weather like today?" }
135
+ # Google Gemini and Google VertexAI expect messages in a different format:
136
+ # { role: "user", parts: [{ text: "why is the sky blue?" }]
135
137
  ]
136
138
  response = llm.chat(messages: messages)
137
139
  chat_completion = response.chat_completion
@@ -503,7 +505,7 @@ assistant.add_message_and_run!(content: "What's the latest news about AI?")
503
505
  # Supply an image to the assistant
504
506
  assistant.add_message_and_run!(
505
507
  content: "Show me a picture of a cat",
506
- image: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
508
+ image_url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
507
509
  )
508
510
 
509
511
  # Access the conversation thread
@@ -525,11 +527,14 @@ assistant.add_message(content: "Hello")
525
527
  assistant.run(auto_tool_execution: true)
526
528
  ```
527
529
 
530
+ Note that streaming is not currently supported for all LLMs.
531
+
528
532
  ### Configuration
529
533
  * `llm`: The LLM instance to use (required)
530
534
  * `tools`: An array of tool instances (optional)
531
535
  * `instructions`: System instructions for the assistant (optional)
532
536
  * `tool_choice`: Specifies how tools should be selected. Default: "auto". A specific tool function name can be passed. This will force the Assistant to **always** use this function.
537
+ * `parallel_tool_calls`: Whether to make multiple parallel tool calls. Default: true
533
538
  * `add_message_callback`: A callback function (proc, lambda) that is called when any message is added to the conversation (optional)
534
539
 
535
540
  ### Key Methods
@@ -1,4 +1,4 @@
1
- Dir[Pathname.new(__FILE__).dirname.join("adapters", "*.rb")].sort.each { |file| require file }
1
+ # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
4
  class Assistant
@@ -0,0 +1,105 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Assistant
5
+ module LLM
6
+ module Adapters
7
+ class Anthropic < Base
8
+ # Build the chat parameters for the Anthropic API
9
+ #
10
+ # @param messages [Array<Hash>] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array<Hash>] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
23
+ params = {messages: messages}
24
+ if tools.any?
25
+ params[:tools] = build_tools(tools)
26
+ params[:tool_choice] = build_tool_choice(tool_choice, parallel_tool_calls)
27
+ end
28
+ params[:system] = instructions if instructions
29
+ params
30
+ end
31
+
32
+ # Build an Anthropic message
33
+ #
34
+ # @param role [String] The role of the message
35
+ # @param content [String] The content of the message
36
+ # @param image_url [String] The image URL
37
+ # @param tool_calls [Array<Hash>] The tool calls
38
+ # @param tool_call_id [String] The tool call ID
39
+ # @return [Messages::AnthropicMessage] The Anthropic message
40
+ def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
41
+ Langchain.logger.warn "WARNING: Image URL is not supported by Anthropic currently" if image_url
42
+
43
+ Messages::AnthropicMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
44
+ end
45
+
46
+ # Extract the tool call information from the Anthropic tool call hash
47
+ #
48
+ # @param tool_call [Hash] The tool call hash, format: {"type"=>"tool_use", "id"=>"toolu_01TjusbFApEbwKPRWTRwzadR", "name"=>"news_retriever__get_top_headlines", "input"=>{"country"=>"us", "page_size"=>10}}], "stop_reason"=>"tool_use"}
49
+ # @return [Array] The tool call information
50
+ def extract_tool_call_args(tool_call:)
51
+ tool_call_id = tool_call.dig("id")
52
+ function_name = tool_call.dig("name")
53
+ tool_name, method_name = function_name.split("__")
54
+ tool_arguments = tool_call.dig("input").transform_keys(&:to_sym)
55
+ [tool_call_id, tool_name, method_name, tool_arguments]
56
+ end
57
+
58
+ # Build the tools for the Anthropic API
59
+ def build_tools(tools)
60
+ tools.map { |tool| tool.class.function_schemas.to_anthropic_format }.flatten
61
+ end
62
+
63
+ # Get the allowed assistant.tool_choice values for Anthropic
64
+ def allowed_tool_choices
65
+ ["auto", "any"]
66
+ end
67
+
68
+ # Get the available tool function names for Anthropic
69
+ #
70
+ # @param tools [Array<Hash>] The tools
71
+ # @return [Array<String>] The tool function names
72
+ def available_tool_names(tools)
73
+ build_tools(tools).map { |tool| tool.dig(:name) }
74
+ end
75
+
76
+ def tool_role
77
+ Messages::AnthropicMessage::TOOL_ROLE
78
+ end
79
+
80
+ def support_system_message?
81
+ Messages::AnthropicMessage::ROLES.include?("system")
82
+ end
83
+
84
+ private
85
+
86
+ def build_tool_choice(choice, parallel_tool_calls)
87
+ tool_choice_object = {disable_parallel_tool_use: !parallel_tool_calls}
88
+
89
+ case choice
90
+ when "auto"
91
+ tool_choice_object[:type] = "auto"
92
+ when "any"
93
+ tool_choice_object[:type] = "any"
94
+ else
95
+ tool_choice_object[:type] = "tool"
96
+ tool_choice_object[:name] = choice
97
+ end
98
+
99
+ tool_choice_object
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Assistant
5
+ module LLM
6
+ module Adapters
7
+ class Base
8
+ # Build the chat parameters for the LLM
9
+ #
10
+ # @param messages [Array] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
23
+ raise NotImplementedError, "Subclasses must implement build_chat_params"
24
+ end
25
+
26
+ # Extract the tool call information from the tool call hash
27
+ #
28
+ # @param tool_call [Hash] The tool call hash
29
+ # @return [Array] The tool call information
30
+ def extract_tool_call_args(tool_call:)
31
+ raise NotImplementedError, "Subclasses must implement extract_tool_call_args"
32
+ end
33
+
34
+ # Build a message for the LLM
35
+ #
36
+ # @param role [String] The role of the message
37
+ # @param content [String] The content of the message
38
+ # @param image_url [String] The image URL
39
+ # @param tool_calls [Array] The tool calls
40
+ # @param tool_call_id [String] The tool call ID
41
+ # @return [Messages::Base] The message
42
+ def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
43
+ raise NotImplementedError, "Subclasses must implement build_message"
44
+ end
45
+
46
+ # Does this adapter accept messages with role="system"?
47
+ #
48
+ # @return [Boolean] Whether the adapter supports system messages
49
+ def support_system_message?
50
+ raise NotImplementedError, "Subclasses must implement support_system_message?"
51
+ end
52
+
53
+ # Role name used to return the tool output
54
+ #
55
+ # @return [String] The tool role
56
+ def tool_role
57
+ raise NotImplementedError, "Subclasses must implement tool_role"
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
@@ -1,9 +1,27 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Langchain
2
4
  class Assistant
3
5
  module LLM
4
6
  module Adapters
5
7
  class GoogleGemini < Base
6
- def build_chat_params(tools:, instructions:, messages:, tool_choice:)
8
+ # Build the chat parameters for the Google Gemini LLM
9
+ #
10
+ # @param messages [Array] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
23
+ Langchain.logger.warn "WARNING: `parallel_tool_calls:` is not supported by Google Gemini currently"
24
+
7
25
  params = {messages: messages}
8
26
  if tools.any?
9
27
  params[:tools] = build_tools(tools)
@@ -13,10 +31,18 @@ module Langchain
13
31
  params
14
32
  end
15
33
 
34
+ # Build a Google Gemini message
35
+ #
36
+ # @param role [String] The role of the message
37
+ # @param content [String] The content of the message
38
+ # @param image_url [String] The image URL
39
+ # @param tool_calls [Array] The tool calls
40
+ # @param tool_call_id [String] The tool call ID
41
+ # @return [Messages::GoogleGeminiMessage] The Google Gemini message
16
42
  def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
17
- warn "Image URL is not supported by Google Gemini" if image_url
43
+ Langchain.logger.warn "Image URL is not supported by Google Gemini" if image_url
18
44
 
19
- Langchain::Messages::GoogleGeminiMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
45
+ Messages::GoogleGeminiMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
20
46
  end
21
47
 
22
48
  # Extract the tool call information from the Google Gemini tool call hash
@@ -31,18 +57,32 @@ module Langchain
31
57
  [tool_call_id, tool_name, method_name, tool_arguments]
32
58
  end
33
59
 
60
+ # Build the tools for the Google Gemini LLM
61
+ #
62
+ # @param tools [Array<Langchain::Tool::Base>] The tools
63
+ # @return [Array] The tools in Google Gemini format
34
64
  def build_tools(tools)
35
65
  tools.map { |tool| tool.class.function_schemas.to_google_gemini_format }.flatten
36
66
  end
37
67
 
68
+ # Get the allowed assistant.tool_choice values for Google Gemini
38
69
  def allowed_tool_choices
39
70
  ["auto", "none"]
40
71
  end
41
72
 
73
+ # Get the available tool names for Google Gemini
42
74
  def available_tool_names(tools)
43
75
  build_tools(tools).map { |tool| tool.dig(:name) }
44
76
  end
45
77
 
78
+ def tool_role
79
+ Messages::GoogleGeminiMessage::TOOL_ROLE
80
+ end
81
+
82
+ def support_system_message?
83
+ Messages::GoogleGeminiMessage::ROLES.include?("system")
84
+ end
85
+
46
86
  private
47
87
 
48
88
  def build_tool_config(choice)
@@ -1,9 +1,27 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Langchain
2
4
  class Assistant
3
5
  module LLM
4
6
  module Adapters
5
7
  class MistralAI < Base
6
- def build_chat_params(tools:, instructions:, messages:, tool_choice:)
8
+ # Build the chat parameters for the Mistral AI LLM
9
+ #
10
+ # @param messages [Array] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
23
+ Langchain.logger.warn "WARNING: `parallel_tool_calls:` is not supported by Mistral AI currently"
24
+
7
25
  params = {messages: messages}
8
26
  if tools.any?
9
27
  params[:tools] = build_tools(tools)
@@ -12,8 +30,16 @@ module Langchain
12
30
  params
13
31
  end
14
32
 
33
+ # Build a Mistral AI message
34
+ #
35
+ # @param role [String] The role of the message
36
+ # @param content [String] The content of the message
37
+ # @param image_url [String] The image URL
38
+ # @param tool_calls [Array] The tool calls
39
+ # @param tool_call_id [String] The tool call ID
40
+ # @return [Messages::MistralAIMessage] The Mistral AI message
15
41
  def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
16
- Langchain::Messages::MistralAIMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
42
+ Messages::MistralAIMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
17
43
  end
18
44
 
19
45
  # Extract the tool call information from the OpenAI tool call hash
@@ -36,18 +62,29 @@ module Langchain
36
62
  [tool_call_id, tool_name, method_name, tool_arguments]
37
63
  end
38
64
 
65
+ # Build the tools for the Mistral AI LLM
39
66
  def build_tools(tools)
40
67
  tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
41
68
  end
42
69
 
70
+ # Get the allowed assistant.tool_choice values for Mistral AI
43
71
  def allowed_tool_choices
44
72
  ["auto", "none"]
45
73
  end
46
74
 
75
+ # Get the available tool names for Mistral AI
47
76
  def available_tool_names(tools)
48
77
  build_tools(tools).map { |tool| tool.dig(:function, :name) }
49
78
  end
50
79
 
80
+ def tool_role
81
+ Messages::MistralAIMessage::TOOL_ROLE
82
+ end
83
+
84
+ def support_system_message?
85
+ Messages::MistralAIMessage::ROLES.include?("system")
86
+ end
87
+
51
88
  private
52
89
 
53
90
  def build_tool_choice(choice)
@@ -0,0 +1,94 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Assistant
5
+ module LLM
6
+ module Adapters
7
+ class Ollama < Base
8
+ # Build the chat parameters for the Ollama LLM
9
+ #
10
+ # @param messages [Array] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
23
+ Langchain.logger.warn "WARNING: `parallel_tool_calls:` is not supported by Ollama currently"
24
+ Langchain.logger.warn "WARNING: `tool_choice:` is not supported by Ollama currently"
25
+
26
+ params = {messages: messages}
27
+ if tools.any?
28
+ params[:tools] = build_tools(tools)
29
+ end
30
+ params
31
+ end
32
+
33
+ # Build an Ollama message
34
+ #
35
+ # @param role [String] The role of the message
36
+ # @param content [String] The content of the message
37
+ # @param image_url [String] The image URL
38
+ # @param tool_calls [Array] The tool calls
39
+ # @param tool_call_id [String] The tool call ID
40
+ # @return [Messages::OllamaMessage] The Ollama message
41
+ def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
42
+ Langchain.logger.warn "WARNING: Image URL is not supported by Ollama currently" if image_url
43
+
44
+ Messages::OllamaMessage.new(role: role, content: content, tool_calls: tool_calls, tool_call_id: tool_call_id)
45
+ end
46
+
47
+ # Extract the tool call information from the OpenAI tool call hash
48
+ #
49
+ # @param tool_call [Hash] The tool call hash
50
+ # @return [Array] The tool call information
51
+ def extract_tool_call_args(tool_call:)
52
+ tool_call_id = tool_call.dig("id")
53
+
54
+ function_name = tool_call.dig("function", "name")
55
+ tool_name, method_name = function_name.split("__")
56
+
57
+ tool_arguments = tool_call.dig("function", "arguments")
58
+ tool_arguments = if tool_arguments.is_a?(Hash)
59
+ Langchain::Utils::HashTransformer.symbolize_keys(tool_arguments)
60
+ else
61
+ JSON.parse(tool_arguments, symbolize_names: true)
62
+ end
63
+
64
+ [tool_call_id, tool_name, method_name, tool_arguments]
65
+ end
66
+
67
+ # Build the tools for the Ollama LLM
68
+ def available_tool_names(tools)
69
+ build_tools(tools).map { |tool| tool.dig(:function, :name) }
70
+ end
71
+
72
+ # Get the allowed assistant.tool_choice values for Ollama
73
+ def allowed_tool_choices
74
+ ["auto", "none"]
75
+ end
76
+
77
+ def tool_role
78
+ Messages::OllamaMessage::TOOL_ROLE
79
+ end
80
+
81
+ def support_system_message?
82
+ Messages::OllamaMessage::ROLES.include?("system")
83
+ end
84
+
85
+ private
86
+
87
+ def build_tools(tools)
88
+ tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
89
+ end
90
+ end
91
+ end
92
+ end
93
+ end
94
+ end
@@ -1,19 +1,44 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Langchain
2
4
  class Assistant
3
5
  module LLM
4
6
  module Adapters
5
7
  class OpenAI < Base
6
- def build_chat_params(tools:, instructions:, messages:, tool_choice:)
8
+ # Build the chat parameters for the OpenAI LLM
9
+ #
10
+ # @param messages [Array] The messages
11
+ # @param instructions [String] The system instructions
12
+ # @param tools [Array] The tools to use
13
+ # @param tool_choice [String] The tool choice
14
+ # @param parallel_tool_calls [Boolean] Whether to make parallel tool calls
15
+ # @return [Hash] The chat parameters
16
+ def build_chat_params(
17
+ messages:,
18
+ instructions:,
19
+ tools:,
20
+ tool_choice:,
21
+ parallel_tool_calls:
22
+ )
7
23
  params = {messages: messages}
8
24
  if tools.any?
9
25
  params[:tools] = build_tools(tools)
10
26
  params[:tool_choice] = build_tool_choice(tool_choice)
27
+ params[:parallel_tool_calls] = parallel_tool_calls
11
28
  end
12
29
  params
13
30
  end
14
31
 
32
+ # Build a OpenAI message
33
+ #
34
+ # @param role [String] The role of the message
35
+ # @param content [String] The content of the message
36
+ # @param image_url [String] The image URL
37
+ # @param tool_calls [Array] The tool calls
38
+ # @param tool_call_id [String] The tool call ID
39
+ # @return [Messages::OpenAIMessage] The OpenAI message
15
40
  def build_message(role:, content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
16
- Langchain::Messages::OpenAIMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
41
+ Messages::OpenAIMessage.new(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)
17
42
  end
18
43
 
19
44
  # Extract the tool call information from the OpenAI tool call hash
@@ -36,18 +61,29 @@ module Langchain
36
61
  [tool_call_id, tool_name, method_name, tool_arguments]
37
62
  end
38
63
 
64
+ # Build the tools for the OpenAI LLM
39
65
  def build_tools(tools)
40
66
  tools.map { |tool| tool.class.function_schemas.to_openai_format }.flatten
41
67
  end
42
68
 
69
+ # Get the allowed assistant.tool_choice values for OpenAI
43
70
  def allowed_tool_choices
44
71
  ["auto", "none"]
45
72
  end
46
73
 
74
+ # Get the available tool names for OpenAI
47
75
  def available_tool_names(tools)
48
76
  build_tools(tools).map { |tool| tool.dig(:function, :name) }
49
77
  end
50
78
 
79
+ def tool_role
80
+ Messages::OpenAIMessage::TOOL_ROLE
81
+ end
82
+
83
+ def support_system_message?
84
+ Messages::OpenAIMessage::ROLES.include?("system")
85
+ end
86
+
51
87
  private
52
88
 
53
89
  def build_tool_choice(choice)
@@ -0,0 +1,83 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Assistant
5
+ module Messages
6
+ class AnthropicMessage < Base
7
+ ROLES = [
8
+ "assistant",
9
+ "user",
10
+ "tool_result"
11
+ ].freeze
12
+
13
+ TOOL_ROLE = "tool_result"
14
+
15
+ # Initialize a new Anthropic message
16
+ #
17
+ # @param role [String] The role of the message
18
+ # @param content [String] The content of the message
19
+ # @param tool_calls [Array<Hash>] The tool calls made in the message
20
+ # @param tool_call_id [String] The ID of the tool call
21
+ def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
22
+ raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
23
+ raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
24
+
25
+ @role = role
26
+ # Some Tools return content as a JSON hence `.to_s`
27
+ @content = content.to_s
28
+ @tool_calls = tool_calls
29
+ @tool_call_id = tool_call_id
30
+ end
31
+
32
+ # Convert the message to an Anthropic API-compatible hash
33
+ #
34
+ # @return [Hash] The message as an Anthropic API-compatible hash
35
+ def to_hash
36
+ {}.tap do |h|
37
+ h[:role] = tool? ? "user" : role
38
+
39
+ h[:content] = if tool?
40
+ [
41
+ {
42
+ type: "tool_result",
43
+ tool_use_id: tool_call_id,
44
+ content: content
45
+ }
46
+ ]
47
+ elsif tool_calls.any?
48
+ tool_calls
49
+ else
50
+ content
51
+ end
52
+ end
53
+ end
54
+
55
+ # Check if the message is a tool call
56
+ #
57
+ # @return [Boolean] true/false whether this message is a tool call
58
+ def tool?
59
+ role == "tool_result"
60
+ end
61
+
62
+ # Anthropic does not implement system prompts
63
+ def system?
64
+ false
65
+ end
66
+
67
+ # Check if the message came from an LLM
68
+ #
69
+ # @return [Boolean] true/false whether this message was produced by an LLM
70
+ def assistant?
71
+ role == "assistant"
72
+ end
73
+
74
+ # Check if the message came from an LLM
75
+ #
76
+ # @return [Boolean] true/false whether this message was produced by an LLM
77
+ def llm?
78
+ assistant?
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end