langchainrb 0.18.0 → 0.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +20 -0
- data/README.md +2 -2
- data/lib/langchain/assistant/messages/anthropic_message.rb +6 -0
- data/lib/langchain/assistant/messages/google_gemini_message.rb +4 -4
- data/lib/langchain/assistant/messages/mistral_ai_message.rb +69 -24
- data/lib/langchain/assistant/messages/ollama_message.rb +4 -4
- data/lib/langchain/assistant.rb +2 -1
- data/lib/langchain/llm/anthropic.rb +5 -5
- data/lib/langchain/llm/aws_bedrock.rb +10 -10
- data/lib/langchain/llm/azure.rb +1 -1
- data/lib/langchain/llm/base.rb +1 -1
- data/lib/langchain/llm/cohere.rb +8 -8
- data/lib/langchain/llm/google_gemini.rb +5 -6
- data/lib/langchain/llm/google_vertex_ai.rb +6 -5
- data/lib/langchain/llm/hugging_face.rb +4 -4
- data/lib/langchain/llm/mistral_ai.rb +4 -4
- data/lib/langchain/llm/ollama.rb +7 -7
- data/lib/langchain/llm/openai.rb +6 -5
- data/lib/langchain/llm/replicate.rb +6 -6
- data/lib/langchain/tool_definition.rb +7 -0
- data/lib/langchain/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 89fdd6c82d689f6e7057133eab08ef8367e72e211d3a59df423ce26cd3b2c04d
|
4
|
+
data.tar.gz: e77493aec62198a014b0296490779b18016407c1808de45020e25b63a8d42d17
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9040755869f6cbf666f8ec1225b976d095a809915c533e8102b281bc8c66d4751e93abbef19a4c27d806e5abd9555690a4d89401e7d06c29d8e4e9b6252bff4f
|
7
|
+
data.tar.gz: 23287eb713d76ed824e13e4b2e07e66f342a08177b39b7fa2d6fb85af25f3f0ed2bc898d6b1316c2859963526f7cf1e635c413a8786ae671399ff50b87697f82
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,25 @@
|
|
1
|
+
# CHANGELOG
|
2
|
+
|
3
|
+
## Key
|
4
|
+
- [BREAKING]: A breaking change. After an upgrade, your app may need modifications to keep working correctly.
|
5
|
+
- [FEATURE]: A non-breaking improvement to the app. Either introduces new functionality, or improves on an existing feature.
|
6
|
+
- [BUGFIX]: Fixes a bug with a non-breaking change.
|
7
|
+
- [COMPAT]: Compatibility improvements - changes to make Administrate more compatible with different dependency versions.
|
8
|
+
- [OPTIM]: Optimization or performance increase.
|
9
|
+
- [DOCS]: Documentation changes. No changes to the library's behavior.
|
10
|
+
- [SECURITY]: A change which fixes a security vulnerability.
|
11
|
+
|
1
12
|
## [Unreleased]
|
2
13
|
|
14
|
+
## [0.19.0] - 2024-10-23
|
15
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.
|
16
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters.
|
17
|
+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `embeddings_model_name` parameter to `embedding_model` in Langchain::LLM parameters.
|
18
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/850/] Fix MistralAIMessage to handle "Tool" Output
|
19
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic
|
20
|
+
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message
|
21
|
+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/838] Allow setting safety_settings: [] in default_options for Langchain::LLM::GoogleGemini and Langchain::LLM::GoogleVertexAI constructors
|
22
|
+
|
3
23
|
## [0.18.0] - 2024-10-12
|
4
24
|
- [BREAKING] Remove `Langchain::Assistant#clear_thread!` method
|
5
25
|
- [BREAKING] `Langchain::Messages::*` namespace had migrated to `Langchain::Assistant::Messages::*`
|
data/README.md
CHANGED
@@ -86,7 +86,7 @@ Most LLM classes can be initialized with an API key and optional default options
|
|
86
86
|
```ruby
|
87
87
|
llm = Langchain::LLM::OpenAI.new(
|
88
88
|
api_key: ENV["OPENAI_API_KEY"],
|
89
|
-
default_options: { temperature: 0.7,
|
89
|
+
default_options: { temperature: 0.7, chat_model: "gpt-4o" }
|
90
90
|
)
|
91
91
|
```
|
92
92
|
|
@@ -505,7 +505,7 @@ assistant.add_message_and_run!(content: "What's the latest news about AI?")
|
|
505
505
|
# Supply an image to the assistant
|
506
506
|
assistant.add_message_and_run!(
|
507
507
|
content: "Show me a picture of a cat",
|
508
|
-
|
508
|
+
image_url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
509
509
|
)
|
510
510
|
|
511
511
|
# Access the conversation thread
|
@@ -12,6 +12,12 @@ module Langchain
|
|
12
12
|
|
13
13
|
TOOL_ROLE = "tool_result"
|
14
14
|
|
15
|
+
# Initialize a new Anthropic message
|
16
|
+
#
|
17
|
+
# @param role [String] The role of the message
|
18
|
+
# @param content [String] The content of the message
|
19
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
20
|
+
# @param tool_call_id [String] The ID of the tool call
|
15
21
|
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
16
22
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
17
23
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
@@ -15,10 +15,10 @@ module Langchain
|
|
15
15
|
|
16
16
|
# Initialize a new Google Gemini message
|
17
17
|
#
|
18
|
-
# @param [String] The role of the message
|
19
|
-
# @param [String] The content of the message
|
20
|
-
# @param [Array<Hash>] The tool calls made in the message
|
21
|
-
# @param [String] The ID of the tool call
|
18
|
+
# @param role [String] The role of the message
|
19
|
+
# @param content [String] The content of the message
|
20
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
21
|
+
# @param tool_call_id [String] The ID of the tool call
|
22
22
|
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
23
23
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
24
24
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
@@ -45,30 +45,14 @@ module Langchain
|
|
45
45
|
#
|
46
46
|
# @return [Hash] The message as an MistralAI API-compatible hash
|
47
47
|
def to_hash
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
h[:content] = []
|
57
|
-
|
58
|
-
if content && !content.empty?
|
59
|
-
h[:content] << {
|
60
|
-
type: "text",
|
61
|
-
text: content
|
62
|
-
}
|
63
|
-
end
|
64
|
-
|
65
|
-
if image_url
|
66
|
-
h[:content] << {
|
67
|
-
type: "image_url",
|
68
|
-
image_url: image_url
|
69
|
-
}
|
70
|
-
end
|
71
|
-
end
|
48
|
+
if assistant?
|
49
|
+
assistant_hash
|
50
|
+
elsif system?
|
51
|
+
system_hash
|
52
|
+
elsif tool?
|
53
|
+
tool_hash
|
54
|
+
elsif user?
|
55
|
+
user_hash
|
72
56
|
end
|
73
57
|
end
|
74
58
|
|
@@ -92,6 +76,67 @@ module Langchain
|
|
92
76
|
def tool?
|
93
77
|
role == "tool"
|
94
78
|
end
|
79
|
+
|
80
|
+
# Convert the message to an MistralAI API-compatible hash
|
81
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "assistant"
|
82
|
+
def assistant_hash
|
83
|
+
{
|
84
|
+
role: "assistant",
|
85
|
+
content: content,
|
86
|
+
tool_calls: tool_calls,
|
87
|
+
prefix: false
|
88
|
+
}
|
89
|
+
end
|
90
|
+
|
91
|
+
# Convert the message to an MistralAI API-compatible hash
|
92
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "system"
|
93
|
+
def system_hash
|
94
|
+
{
|
95
|
+
role: "system",
|
96
|
+
content: build_content_array
|
97
|
+
}
|
98
|
+
end
|
99
|
+
|
100
|
+
# Convert the message to an MistralAI API-compatible hash
|
101
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "tool"
|
102
|
+
def tool_hash
|
103
|
+
{
|
104
|
+
role: "tool",
|
105
|
+
content: content,
|
106
|
+
tool_call_id: tool_call_id
|
107
|
+
}
|
108
|
+
end
|
109
|
+
|
110
|
+
# Convert the message to an MistralAI API-compatible hash
|
111
|
+
# @return [Hash] The message as an MistralAI API-compatible hash, with the role as "user"
|
112
|
+
def user_hash
|
113
|
+
{
|
114
|
+
role: "user",
|
115
|
+
content: build_content_array
|
116
|
+
}
|
117
|
+
end
|
118
|
+
|
119
|
+
# Builds the content value for the message hash
|
120
|
+
# @return [Array<Hash>] An array of content hashes, with keys :type and :text or :image_url.
|
121
|
+
def build_content_array
|
122
|
+
content_details = []
|
123
|
+
|
124
|
+
if content && !content.empty?
|
125
|
+
content_details << {
|
126
|
+
type: "text",
|
127
|
+
text: content
|
128
|
+
}
|
129
|
+
end
|
130
|
+
|
131
|
+
if image_url
|
132
|
+
content_details << {
|
133
|
+
type: "image_url",
|
134
|
+
image_url: image_url
|
135
|
+
}
|
136
|
+
end
|
137
|
+
|
138
|
+
content_details
|
139
|
+
end
|
95
140
|
end
|
96
141
|
end
|
97
142
|
end
|
@@ -16,10 +16,10 @@ module Langchain
|
|
16
16
|
|
17
17
|
# Initialize a new OpenAI message
|
18
18
|
#
|
19
|
-
# @param [String] The role of the message
|
20
|
-
# @param [String] The content of the message
|
21
|
-
# @param [Array<Hash>] The tool calls made in the message
|
22
|
-
# @param [String] The ID of the tool call
|
19
|
+
# @param role [String] The role of the message
|
20
|
+
# @param content [String] The content of the message
|
21
|
+
# @param tool_calls [Array<Hash>] The tool calls made in the message
|
22
|
+
# @param tool_call_id [String] The ID of the tool call
|
23
23
|
def initialize(role:, content: nil, tool_calls: [], tool_call_id: nil)
|
24
24
|
raise ArgumentError, "Role must be one of #{ROLES.join(", ")}" unless ROLES.include?(role)
|
25
25
|
raise ArgumentError, "Tool calls must be an array of hashes" unless tool_calls.is_a?(Array) && tool_calls.all? { |tool_call| tool_call.is_a?(Hash) }
|
data/lib/langchain/assistant.rb
CHANGED
@@ -196,7 +196,7 @@ module Langchain
|
|
196
196
|
|
197
197
|
if @llm_adapter.support_system_message?
|
198
198
|
# TODO: Should we still set a system message even if @instructions is "" or nil?
|
199
|
-
replace_system_message!(content: new_instructions)
|
199
|
+
replace_system_message!(content: new_instructions)
|
200
200
|
end
|
201
201
|
end
|
202
202
|
|
@@ -217,6 +217,7 @@ module Langchain
|
|
217
217
|
# @return [Array<Langchain::Message>] The messages
|
218
218
|
def replace_system_message!(content:)
|
219
219
|
messages.delete_if(&:system?)
|
220
|
+
return if content.nil?
|
220
221
|
|
221
222
|
message = build_message(role: "system", content: content)
|
222
223
|
messages.unshift(message)
|
@@ -13,8 +13,8 @@ module Langchain::LLM
|
|
13
13
|
class Anthropic < Base
|
14
14
|
DEFAULTS = {
|
15
15
|
temperature: 0.0,
|
16
|
-
|
17
|
-
|
16
|
+
completion_model: "claude-2.1",
|
17
|
+
chat_model: "claude-3-5-sonnet-20240620",
|
18
18
|
max_tokens_to_sample: 256
|
19
19
|
}.freeze
|
20
20
|
|
@@ -22,7 +22,7 @@ module Langchain::LLM
|
|
22
22
|
#
|
23
23
|
# @param api_key [String] The API key to use
|
24
24
|
# @param llm_options [Hash] Options to pass to the Anthropic client
|
25
|
-
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:,
|
25
|
+
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens_to_sample: }
|
26
26
|
# @return [Langchain::LLM::Anthropic] Langchain::LLM::Anthropic instance
|
27
27
|
def initialize(api_key:, llm_options: {}, default_options: {})
|
28
28
|
depends_on "anthropic"
|
@@ -30,7 +30,7 @@ module Langchain::LLM
|
|
30
30
|
@client = ::Anthropic::Client.new(access_token: api_key, **llm_options)
|
31
31
|
@defaults = DEFAULTS.merge(default_options)
|
32
32
|
chat_parameters.update(
|
33
|
-
model: {default: @defaults[:
|
33
|
+
model: {default: @defaults[:chat_model]},
|
34
34
|
temperature: {default: @defaults[:temperature]},
|
35
35
|
max_tokens: {default: @defaults[:max_tokens_to_sample]},
|
36
36
|
metadata: {},
|
@@ -54,7 +54,7 @@ module Langchain::LLM
|
|
54
54
|
# @return [Langchain::LLM::AnthropicResponse] The completion
|
55
55
|
def complete(
|
56
56
|
prompt:,
|
57
|
-
model: @defaults[:
|
57
|
+
model: @defaults[:completion_model],
|
58
58
|
max_tokens_to_sample: @defaults[:max_tokens_to_sample],
|
59
59
|
stop_sequences: nil,
|
60
60
|
temperature: @defaults[:temperature],
|
@@ -11,9 +11,9 @@ module Langchain::LLM
|
|
11
11
|
#
|
12
12
|
class AwsBedrock < Base
|
13
13
|
DEFAULTS = {
|
14
|
-
|
15
|
-
|
16
|
-
|
14
|
+
chat_model: "anthropic.claude-v2",
|
15
|
+
completion_model: "anthropic.claude-v2",
|
16
|
+
embedding_model: "amazon.titan-embed-text-v1",
|
17
17
|
max_tokens_to_sample: 300,
|
18
18
|
temperature: 1,
|
19
19
|
top_k: 250,
|
@@ -60,7 +60,7 @@ module Langchain::LLM
|
|
60
60
|
@defaults = DEFAULTS.merge(default_options)
|
61
61
|
|
62
62
|
chat_parameters.update(
|
63
|
-
model: {default: @defaults[:
|
63
|
+
model: {default: @defaults[:chat_model]},
|
64
64
|
temperature: {},
|
65
65
|
max_tokens: {default: @defaults[:max_tokens_to_sample]},
|
66
66
|
metadata: {},
|
@@ -84,7 +84,7 @@ module Langchain::LLM
|
|
84
84
|
parameters = compose_embedding_parameters params.merge(text:)
|
85
85
|
|
86
86
|
response = client.invoke_model({
|
87
|
-
model_id: @defaults[:
|
87
|
+
model_id: @defaults[:embedding_model],
|
88
88
|
body: parameters.to_json,
|
89
89
|
content_type: "application/json",
|
90
90
|
accept: "application/json"
|
@@ -103,14 +103,14 @@ module Langchain::LLM
|
|
103
103
|
def complete(prompt:, **params)
|
104
104
|
raise "Completion provider #{completion_provider} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(completion_provider)
|
105
105
|
|
106
|
-
raise "Model #{@defaults[:
|
106
|
+
raise "Model #{@defaults[:completion_model]} only supports #chat." if @defaults[:completion_model].include?("claude-3")
|
107
107
|
|
108
108
|
parameters = compose_parameters params
|
109
109
|
|
110
110
|
parameters[:prompt] = wrap_prompt prompt
|
111
111
|
|
112
112
|
response = client.invoke_model({
|
113
|
-
model_id: @defaults[:
|
113
|
+
model_id: @defaults[:completion_model],
|
114
114
|
body: parameters.to_json,
|
115
115
|
content_type: "application/json",
|
116
116
|
accept: "application/json"
|
@@ -126,7 +126,7 @@ module Langchain::LLM
|
|
126
126
|
# @param [Hash] params unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA]
|
127
127
|
# @option params [Array<String>] :messages The messages to generate a completion for
|
128
128
|
# @option params [String] :system The system prompt to provide instructions
|
129
|
-
# @option params [String] :model The model to use for completion defaults to @defaults[:
|
129
|
+
# @option params [String] :model The model to use for completion defaults to @defaults[:chat_model]
|
130
130
|
# @option params [Integer] :max_tokens The maximum number of tokens to generate defaults to @defaults[:max_tokens_to_sample]
|
131
131
|
# @option params [Array<String>] :stop The stop sequences to use for completion
|
132
132
|
# @option params [Array<String>] :stop_sequences The stop sequences to use for completion
|
@@ -175,11 +175,11 @@ module Langchain::LLM
|
|
175
175
|
private
|
176
176
|
|
177
177
|
def completion_provider
|
178
|
-
@defaults[:
|
178
|
+
@defaults[:completion_model].split(".").first.to_sym
|
179
179
|
end
|
180
180
|
|
181
181
|
def embedding_provider
|
182
|
-
@defaults[:
|
182
|
+
@defaults[:embedding_model].split(".").first.to_sym
|
183
183
|
end
|
184
184
|
|
185
185
|
def wrap_prompt(prompt)
|
data/lib/langchain/llm/azure.rb
CHANGED
@@ -33,7 +33,7 @@ module Langchain::LLM
|
|
33
33
|
)
|
34
34
|
@defaults = DEFAULTS.merge(default_options)
|
35
35
|
chat_parameters.update(
|
36
|
-
model: {default: @defaults[:
|
36
|
+
model: {default: @defaults[:chat_model]},
|
37
37
|
logprobs: {},
|
38
38
|
top_logprobs: {},
|
39
39
|
n: {default: @defaults[:n]},
|
data/lib/langchain/llm/base.rb
CHANGED
@@ -34,7 +34,7 @@ module Langchain::LLM
|
|
34
34
|
default_dimensions
|
35
35
|
end
|
36
36
|
|
37
|
-
# Returns the number of vector dimensions used by DEFAULTS[:
|
37
|
+
# Returns the number of vector dimensions used by DEFAULTS[:chat_model]
|
38
38
|
#
|
39
39
|
# @return [Integer] Vector dimensions
|
40
40
|
def default_dimensions
|
data/lib/langchain/llm/cohere.rb
CHANGED
@@ -13,9 +13,9 @@ module Langchain::LLM
|
|
13
13
|
class Cohere < Base
|
14
14
|
DEFAULTS = {
|
15
15
|
temperature: 0.0,
|
16
|
-
|
17
|
-
|
18
|
-
|
16
|
+
completion_model: "command",
|
17
|
+
chat_model: "command-r-plus",
|
18
|
+
embedding_model: "small",
|
19
19
|
dimensions: 1024,
|
20
20
|
truncate: "START"
|
21
21
|
}.freeze
|
@@ -26,7 +26,7 @@ module Langchain::LLM
|
|
26
26
|
@client = ::Cohere::Client.new(api_key: api_key)
|
27
27
|
@defaults = DEFAULTS.merge(default_options)
|
28
28
|
chat_parameters.update(
|
29
|
-
model: {default: @defaults[:
|
29
|
+
model: {default: @defaults[:chat_model]},
|
30
30
|
temperature: {default: @defaults[:temperature]},
|
31
31
|
response_format: {default: @defaults[:response_format]}
|
32
32
|
)
|
@@ -48,10 +48,10 @@ module Langchain::LLM
|
|
48
48
|
def embed(text:)
|
49
49
|
response = client.embed(
|
50
50
|
texts: [text],
|
51
|
-
model: @defaults[:
|
51
|
+
model: @defaults[:embedding_model]
|
52
52
|
)
|
53
53
|
|
54
|
-
Langchain::LLM::CohereResponse.new response, model: @defaults[:
|
54
|
+
Langchain::LLM::CohereResponse.new response, model: @defaults[:embedding_model]
|
55
55
|
end
|
56
56
|
|
57
57
|
#
|
@@ -65,7 +65,7 @@ module Langchain::LLM
|
|
65
65
|
default_params = {
|
66
66
|
prompt: prompt,
|
67
67
|
temperature: @defaults[:temperature],
|
68
|
-
model: @defaults[:
|
68
|
+
model: @defaults[:completion_model],
|
69
69
|
truncate: @defaults[:truncate]
|
70
70
|
}
|
71
71
|
|
@@ -76,7 +76,7 @@ module Langchain::LLM
|
|
76
76
|
default_params.merge!(params)
|
77
77
|
|
78
78
|
response = client.generate(**default_params)
|
79
|
-
Langchain::LLM::CohereResponse.new response, model: @defaults[:
|
79
|
+
Langchain::LLM::CohereResponse.new response, model: @defaults[:completion_model]
|
80
80
|
end
|
81
81
|
|
82
82
|
# Generate a chat completion for given messages
|
@@ -5,8 +5,8 @@ module Langchain::LLM
|
|
5
5
|
# llm = Langchain::LLM::GoogleGemini.new(api_key: ENV['GOOGLE_GEMINI_API_KEY'])
|
6
6
|
class GoogleGemini < Base
|
7
7
|
DEFAULTS = {
|
8
|
-
|
9
|
-
|
8
|
+
chat_model: "gemini-1.5-pro-latest",
|
9
|
+
embedding_model: "text-embedding-004",
|
10
10
|
temperature: 0.0
|
11
11
|
}
|
12
12
|
|
@@ -17,10 +17,10 @@ module Langchain::LLM
|
|
17
17
|
@defaults = DEFAULTS.merge(default_options)
|
18
18
|
|
19
19
|
chat_parameters.update(
|
20
|
-
model: {default: @defaults[:
|
20
|
+
model: {default: @defaults[:chat_model]},
|
21
21
|
temperature: {default: @defaults[:temperature]},
|
22
22
|
generation_config: {default: nil},
|
23
|
-
safety_settings: {default:
|
23
|
+
safety_settings: {default: @defaults[:safety_settings]}
|
24
24
|
)
|
25
25
|
chat_parameters.remap(
|
26
26
|
messages: :contents,
|
@@ -72,9 +72,8 @@ module Langchain::LLM
|
|
72
72
|
|
73
73
|
def embed(
|
74
74
|
text:,
|
75
|
-
model: @defaults[:
|
75
|
+
model: @defaults[:embedding_model]
|
76
76
|
)
|
77
|
-
|
78
77
|
params = {
|
79
78
|
content: {
|
80
79
|
parts: [
|
@@ -17,8 +17,8 @@ module Langchain::LLM
|
|
17
17
|
top_p: 0.8,
|
18
18
|
top_k: 40,
|
19
19
|
dimensions: 768,
|
20
|
-
|
21
|
-
|
20
|
+
embedding_model: "textembedding-gecko",
|
21
|
+
chat_model: "gemini-1.0-pro"
|
22
22
|
}.freeze
|
23
23
|
|
24
24
|
# Google Cloud has a project id and a specific region of deployment.
|
@@ -38,8 +38,9 @@ module Langchain::LLM
|
|
38
38
|
@defaults = DEFAULTS.merge(default_options)
|
39
39
|
|
40
40
|
chat_parameters.update(
|
41
|
-
model: {default: @defaults[:
|
42
|
-
temperature: {default: @defaults[:temperature]}
|
41
|
+
model: {default: @defaults[:chat_model]},
|
42
|
+
temperature: {default: @defaults[:temperature]},
|
43
|
+
safety_settings: {default: @defaults[:safety_settings]}
|
43
44
|
)
|
44
45
|
chat_parameters.remap(
|
45
46
|
messages: :contents,
|
@@ -57,7 +58,7 @@ module Langchain::LLM
|
|
57
58
|
#
|
58
59
|
def embed(
|
59
60
|
text:,
|
60
|
-
model: @defaults[:
|
61
|
+
model: @defaults[:embedding_model]
|
61
62
|
)
|
62
63
|
params = {instances: [{content: text}]}
|
63
64
|
|
@@ -12,7 +12,7 @@ module Langchain::LLM
|
|
12
12
|
#
|
13
13
|
class HuggingFace < Base
|
14
14
|
DEFAULTS = {
|
15
|
-
|
15
|
+
embedding_model: "sentence-transformers/all-MiniLM-L6-v2"
|
16
16
|
}.freeze
|
17
17
|
|
18
18
|
EMBEDDING_SIZES = {
|
@@ -36,7 +36,7 @@ module Langchain::LLM
|
|
36
36
|
def default_dimensions
|
37
37
|
# since Huggin Face can run multiple models, look it up or generate an embedding and return the size
|
38
38
|
@default_dimensions ||= @defaults[:dimensions] ||
|
39
|
-
EMBEDDING_SIZES.fetch(@defaults[:
|
39
|
+
EMBEDDING_SIZES.fetch(@defaults[:embedding_model].to_sym) do
|
40
40
|
embed(text: "test").embedding.size
|
41
41
|
end
|
42
42
|
end
|
@@ -50,9 +50,9 @@ module Langchain::LLM
|
|
50
50
|
def embed(text:)
|
51
51
|
response = client.embedding(
|
52
52
|
input: text,
|
53
|
-
model: @defaults[:
|
53
|
+
model: @defaults[:embedding_model]
|
54
54
|
)
|
55
|
-
Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:
|
55
|
+
Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:embedding_model])
|
56
56
|
end
|
57
57
|
end
|
58
58
|
end
|
@@ -8,8 +8,8 @@ module Langchain::LLM
|
|
8
8
|
# llm = Langchain::LLM::MistralAI.new(api_key: ENV["MISTRAL_AI_API_KEY"])
|
9
9
|
class MistralAI < Base
|
10
10
|
DEFAULTS = {
|
11
|
-
|
12
|
-
|
11
|
+
chat_model: "mistral-large-latest",
|
12
|
+
embedding_model: "mistral-embed"
|
13
13
|
}.freeze
|
14
14
|
|
15
15
|
attr_reader :defaults
|
@@ -24,7 +24,7 @@ module Langchain::LLM
|
|
24
24
|
|
25
25
|
@defaults = DEFAULTS.merge(default_options)
|
26
26
|
chat_parameters.update(
|
27
|
-
model: {default: @defaults[:
|
27
|
+
model: {default: @defaults[:chat_model]},
|
28
28
|
n: {default: @defaults[:n]},
|
29
29
|
safe_prompt: {},
|
30
30
|
temperature: {default: @defaults[:temperature]},
|
@@ -44,7 +44,7 @@ module Langchain::LLM
|
|
44
44
|
|
45
45
|
def embed(
|
46
46
|
text:,
|
47
|
-
model: defaults[:
|
47
|
+
model: defaults[:embedding_model],
|
48
48
|
encoding_format: nil
|
49
49
|
)
|
50
50
|
params = {
|
data/lib/langchain/llm/ollama.rb
CHANGED
@@ -12,9 +12,9 @@ module Langchain::LLM
|
|
12
12
|
|
13
13
|
DEFAULTS = {
|
14
14
|
temperature: 0.0,
|
15
|
-
|
16
|
-
|
17
|
-
|
15
|
+
completion_model: "llama3.1",
|
16
|
+
embedding_model: "llama3.1",
|
17
|
+
chat_model: "llama3.1"
|
18
18
|
}.freeze
|
19
19
|
|
20
20
|
EMBEDDING_SIZES = {
|
@@ -41,7 +41,7 @@ module Langchain::LLM
|
|
41
41
|
@api_key = api_key
|
42
42
|
@defaults = DEFAULTS.merge(default_options)
|
43
43
|
chat_parameters.update(
|
44
|
-
model: {default: @defaults[:
|
44
|
+
model: {default: @defaults[:chat_model]},
|
45
45
|
temperature: {default: @defaults[:temperature]},
|
46
46
|
template: {},
|
47
47
|
stream: {default: false},
|
@@ -55,7 +55,7 @@ module Langchain::LLM
|
|
55
55
|
def default_dimensions
|
56
56
|
# since Ollama can run multiple models, look it up or generate an embedding and return the size
|
57
57
|
@default_dimensions ||=
|
58
|
-
EMBEDDING_SIZES.fetch(defaults[:
|
58
|
+
EMBEDDING_SIZES.fetch(defaults[:embedding_model].to_sym) do
|
59
59
|
embed(text: "test").embedding.size
|
60
60
|
end
|
61
61
|
end
|
@@ -77,7 +77,7 @@ module Langchain::LLM
|
|
77
77
|
#
|
78
78
|
def complete(
|
79
79
|
prompt:,
|
80
|
-
model: defaults[:
|
80
|
+
model: defaults[:completion_model],
|
81
81
|
images: nil,
|
82
82
|
format: nil,
|
83
83
|
system: nil,
|
@@ -199,7 +199,7 @@ module Langchain::LLM
|
|
199
199
|
#
|
200
200
|
def embed(
|
201
201
|
text:,
|
202
|
-
model: defaults[:
|
202
|
+
model: defaults[:embedding_model],
|
203
203
|
mirostat: nil,
|
204
204
|
mirostat_eta: nil,
|
205
205
|
mirostat_tau: nil,
|
data/lib/langchain/llm/openai.rb
CHANGED
@@ -16,8 +16,8 @@ module Langchain::LLM
|
|
16
16
|
DEFAULTS = {
|
17
17
|
n: 1,
|
18
18
|
temperature: 0.0,
|
19
|
-
|
20
|
-
|
19
|
+
chat_model: "gpt-4o-mini",
|
20
|
+
embedding_model: "text-embedding-3-small"
|
21
21
|
}.freeze
|
22
22
|
|
23
23
|
EMBEDDING_SIZES = {
|
@@ -41,7 +41,7 @@ module Langchain::LLM
|
|
41
41
|
|
42
42
|
@defaults = DEFAULTS.merge(default_options)
|
43
43
|
chat_parameters.update(
|
44
|
-
model: {default: @defaults[:
|
44
|
+
model: {default: @defaults[:chat_model]},
|
45
45
|
logprobs: {},
|
46
46
|
top_logprobs: {},
|
47
47
|
n: {default: @defaults[:n]},
|
@@ -61,7 +61,7 @@ module Langchain::LLM
|
|
61
61
|
# @return [Langchain::LLM::OpenAIResponse] Response object
|
62
62
|
def embed(
|
63
63
|
text:,
|
64
|
-
model: defaults[:
|
64
|
+
model: defaults[:embedding_model],
|
65
65
|
encoding_format: nil,
|
66
66
|
user: nil,
|
67
67
|
dimensions: @defaults[:dimensions]
|
@@ -109,6 +109,7 @@ module Langchain::LLM
|
|
109
109
|
messages = [{role: "user", content: prompt}]
|
110
110
|
chat(messages: messages, **params)
|
111
111
|
end
|
112
|
+
|
112
113
|
# rubocop:enable Style/ArgumentsForwarding
|
113
114
|
|
114
115
|
# Generate a chat completion for given messages.
|
@@ -159,7 +160,7 @@ module Langchain::LLM
|
|
159
160
|
end
|
160
161
|
|
161
162
|
def default_dimensions
|
162
|
-
@defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:
|
163
|
+
@defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embedding_model])
|
163
164
|
end
|
164
165
|
|
165
166
|
private
|
@@ -14,8 +14,8 @@ module Langchain::LLM
|
|
14
14
|
# TODO: Figure out how to send the temperature to the API
|
15
15
|
temperature: 0.01, # Minimum accepted value
|
16
16
|
# TODO: Design the interface to pass and use different models
|
17
|
-
|
18
|
-
|
17
|
+
completion_model: "replicate/vicuna-13b",
|
18
|
+
embedding_model: "creatorrr/all-mpnet-base-v2",
|
19
19
|
dimensions: 384
|
20
20
|
}.freeze
|
21
21
|
|
@@ -49,7 +49,7 @@ module Langchain::LLM
|
|
49
49
|
sleep(0.1)
|
50
50
|
end
|
51
51
|
|
52
|
-
Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:
|
52
|
+
Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:embedding_model])
|
53
53
|
end
|
54
54
|
|
55
55
|
#
|
@@ -66,7 +66,7 @@ module Langchain::LLM
|
|
66
66
|
sleep(0.1)
|
67
67
|
end
|
68
68
|
|
69
|
-
Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:
|
69
|
+
Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:completion_model])
|
70
70
|
end
|
71
71
|
|
72
72
|
#
|
@@ -94,11 +94,11 @@ module Langchain::LLM
|
|
94
94
|
private
|
95
95
|
|
96
96
|
def completion_model
|
97
|
-
@completion_model ||= client.retrieve_model(@defaults[:
|
97
|
+
@completion_model ||= client.retrieve_model(@defaults[:completion_model]).latest_version
|
98
98
|
end
|
99
99
|
|
100
100
|
def embeddings_model
|
101
|
-
@embeddings_model ||= client.retrieve_model(@defaults[:
|
101
|
+
@embeddings_model ||= client.retrieve_model(@defaults[:embedding_model]).latest_version
|
102
102
|
end
|
103
103
|
end
|
104
104
|
end
|
@@ -103,6 +103,13 @@ module Langchain::ToolDefinition
|
|
103
103
|
# @return [String] JSON string of schemas in Anthropic format
|
104
104
|
def to_anthropic_format
|
105
105
|
@schemas.values.map do |schema|
|
106
|
+
# Adds a default input_schema if no parameters are present
|
107
|
+
schema[:function][:parameters] ||= {
|
108
|
+
type: "object",
|
109
|
+
properties: {},
|
110
|
+
required: []
|
111
|
+
}
|
112
|
+
|
106
113
|
schema[:function].transform_keys(parameters: :input_schema)
|
107
114
|
end
|
108
115
|
end
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.19.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-10-
|
11
|
+
date: 2024-10-23 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|