langchainrb 0.17.1 → 0.18.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (38) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/README.md +5 -0
  4. data/lib/langchain/{assistants → assistant}/llm/adapter.rb +1 -1
  5. data/lib/langchain/assistant/llm/adapters/anthropic.rb +105 -0
  6. data/lib/langchain/assistant/llm/adapters/base.rb +63 -0
  7. data/lib/langchain/{assistants → assistant}/llm/adapters/google_gemini.rb +43 -3
  8. data/lib/langchain/{assistants → assistant}/llm/adapters/mistral_ai.rb +39 -2
  9. data/lib/langchain/assistant/llm/adapters/ollama.rb +94 -0
  10. data/lib/langchain/{assistants → assistant}/llm/adapters/openai.rb +38 -2
  11. data/lib/langchain/assistant/messages/anthropic_message.rb +77 -0
  12. data/lib/langchain/assistant/messages/base.rb +56 -0
  13. data/lib/langchain/assistant/messages/google_gemini_message.rb +92 -0
  14. data/lib/langchain/assistant/messages/mistral_ai_message.rb +98 -0
  15. data/lib/langchain/assistant/messages/ollama_message.rb +76 -0
  16. data/lib/langchain/assistant/messages/openai_message.rb +105 -0
  17. data/lib/langchain/{assistants/assistant.rb → assistant.rb} +26 -49
  18. data/lib/langchain/llm/ai21.rb +1 -1
  19. data/lib/langchain/llm/anthropic.rb +59 -4
  20. data/lib/langchain/llm/aws_bedrock.rb +6 -7
  21. data/lib/langchain/llm/azure.rb +1 -1
  22. data/lib/langchain/llm/hugging_face.rb +1 -1
  23. data/lib/langchain/llm/ollama.rb +0 -1
  24. data/lib/langchain/llm/openai.rb +2 -2
  25. data/lib/langchain/llm/parameters/chat.rb +1 -0
  26. data/lib/langchain/llm/replicate.rb +2 -10
  27. data/lib/langchain/version.rb +1 -1
  28. data/lib/langchain.rb +1 -14
  29. metadata +16 -16
  30. data/lib/langchain/assistants/llm/adapters/_base.rb +0 -21
  31. data/lib/langchain/assistants/llm/adapters/anthropic.rb +0 -62
  32. data/lib/langchain/assistants/llm/adapters/ollama.rb +0 -57
  33. data/lib/langchain/assistants/messages/anthropic_message.rb +0 -75
  34. data/lib/langchain/assistants/messages/base.rb +0 -54
  35. data/lib/langchain/assistants/messages/google_gemini_message.rb +0 -90
  36. data/lib/langchain/assistants/messages/mistral_ai_message.rb +0 -96
  37. data/lib/langchain/assistants/messages/ollama_message.rb +0 -74
  38. data/lib/langchain/assistants/messages/openai_message.rb +0 -103
@@ -1,74 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- module Messages
5
- class OllamaMessage < Base
6
- # OpenAI uses the following roles:
7
- ROLES = [
8
- "system",
9
- "assistant",
10
- "user",
11
- "tool"
12
- ].freeze
13
-
14
- TOOL_ROLE = "tool"
15
-
16
- # Initialize a new OpenAI message
17
- #
18
- # @param [String] The role of the message
19
- # @param [String] The content of the message
20
- # @param [Array<Hash>] The tool calls made in the message
21
- # @param [String] The ID of the tool call
22
- def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
23
- raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
24
- raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
25
-
26
- @role = role
27
- # Some Tools return content as a JSON hence `.to_s`
28
- @content = content.to_s
29
- @tool_calls = tool_calls
30
- @tool_call_id = tool_call_id
31
- end
32
-
33
- # Convert the message to an OpenAI API-compatible hash
34
- #
35
- # @return [Hash] The message as an OpenAI API-compatible hash
36
- def to_hash
37
- {}.tap do |h|
38
- h[:role] = role
39
- h[:content] = content if content # Content is nil for tool calls
40
- h[:tool_calls] = tool_calls if tool_calls.any?
41
- h[:tool_call_id] = tool_call_id if tool_call_id
42
- end
43
- end
44
-
45
- # Check if the message came from an LLM
46
- #
47
- # @return [Boolean] true/false whether this message was produced by an LLM
48
- def llm?
49
- assistant?
50
- end
51
-
52
- # Check if the message came from an LLM
53
- #
54
- # @return [Boolean] true/false whether this message was produced by an LLM
55
- def assistant?
56
- role == "assistant"
57
- end
58
-
59
- # Check if the message are system instructions
60
- #
61
- # @return [Boolean] true/false whether this message are system instructions
62
- def system?
63
- role == "system"
64
- end
65
-
66
- # Check if the message is a tool call
67
- #
68
- # @return [Boolean] true/false whether this message is a tool call
69
- def tool?
70
- role == "tool"
71
- end
72
- end
73
- end
74
- end
@@ -1,103 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- module Messages
5
- class OpenAIMessage < Base
6
- # OpenAI uses the following roles:
7
- ROLES = [
8
- "system",
9
- "assistant",
10
- "user",
11
- "tool"
12
- ].freeze
13
-
14
- TOOL_ROLE = "tool"
15
-
16
- # Initialize a new OpenAI message
17
- #
18
- # @param role [String] The role of the message
19
- # @param content [String] The content of the message
20
- # @param image_url [String] The URL of the image
21
- # @param tool_calls [Array<Hash>] The tool calls made in the message
22
- # @param tool_call_id [String] The ID of the tool call
23
- def initialize(
24
- role:,
25
- content: nil,
26
- image_url: nil,
27
- tool_calls: [],
28
- tool_call_id: nil
29
- )
30
- raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
31
- raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
32
-
33
- @role = role
34
- # Some Tools return content as a JSON hence `.to_s`
35
- @content = content.to_s
36
- @image_url = image_url
37
- @tool_calls = tool_calls
38
- @tool_call_id = tool_call_id
39
- end
40
-
41
- # Check if the message came from an LLM
42
- #
43
- # @return [Boolean] true/false whether this message was produced by an LLM
44
- def llm?
45
- assistant?
46
- end
47
-
48
- # Convert the message to an OpenAI API-compatible hash
49
- #
50
- # @return [Hash] The message as an OpenAI API-compatible hash
51
- def to_hash
52
- {}.tap do |h|
53
- h[:role] = role
54
-
55
- if tool_calls.any?
56
- h[:tool_calls] = tool_calls
57
- else
58
- h[:tool_call_id] = tool_call_id if tool_call_id
59
-
60
- h[:content] = []
61
-
62
- if content && !content.empty?
63
- h[:content] << {
64
- type: "text",
65
- text: content
66
- }
67
- end
68
-
69
- if image_url
70
- h[:content] << {
71
- type: "image_url",
72
- image_url: {
73
- url: image_url
74
- }
75
- }
76
- end
77
- end
78
- end
79
- end
80
-
81
- # Check if the message came from an LLM
82
- #
83
- # @return [Boolean] true/false whether this message was produced by an LLM
84
- def assistant?
85
- role == "assistant"
86
- end
87
-
88
- # Check if the message are system instructions
89
- #
90
- # @return [Boolean] true/false whether this message are system instructions
91
- def system?
92
- role == "system"
93
- end
94
-
95
- # Check if the message is a tool call
96
- #
97
- # @return [Boolean] true/false whether this message is a tool call
98
- def tool?
99
- role == "tool"
100
- end
101
- end
102
- end
103
- end