langchainrb 0.18.0 → 0.19.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +30 -0
  3. data/README.md +4 -4
  4. data/lib/langchain/assistant/llm/adapter.rb +7 -6
  5. data/lib/langchain/assistant/llm/adapters/anthropic.rb +1 -3
  6. data/lib/langchain/assistant/llm/adapters/aws_bedrock_anthropic.rb +35 -0
  7. data/lib/langchain/assistant/llm/adapters/ollama.rb +1 -3
  8. data/lib/langchain/assistant/messages/anthropic_message.rb +89 -17
  9. data/lib/langchain/assistant/messages/base.rb +4 -0
  10. data/lib/langchain/assistant/messages/google_gemini_message.rb +62 -21
  11. data/lib/langchain/assistant/messages/mistral_ai_message.rb +69 -24
  12. data/lib/langchain/assistant/messages/ollama_message.rb +9 -5
  13. data/lib/langchain/assistant/messages/openai_message.rb +78 -26
  14. data/lib/langchain/assistant.rb +2 -1
  15. data/lib/langchain/llm/anthropic.rb +10 -10
  16. data/lib/langchain/llm/aws_bedrock.rb +75 -120
  17. data/lib/langchain/llm/azure.rb +1 -1
  18. data/lib/langchain/llm/base.rb +1 -1
  19. data/lib/langchain/llm/cohere.rb +8 -8
  20. data/lib/langchain/llm/google_gemini.rb +5 -6
  21. data/lib/langchain/llm/google_vertex_ai.rb +6 -5
  22. data/lib/langchain/llm/hugging_face.rb +4 -4
  23. data/lib/langchain/llm/mistral_ai.rb +4 -4
  24. data/lib/langchain/llm/ollama.rb +10 -8
  25. data/lib/langchain/llm/openai.rb +6 -5
  26. data/lib/langchain/llm/parameters/chat.rb +4 -1
  27. data/lib/langchain/llm/replicate.rb +6 -6
  28. data/lib/langchain/llm/response/ai21_response.rb +20 -0
  29. data/lib/langchain/tool_definition.rb +7 -0
  30. data/lib/langchain/utils/image_wrapper.rb +37 -0
  31. data/lib/langchain/version.rb +1 -1
  32. metadata +4 -2
@@ -8,8 +8,8 @@ module Langchain::LLM
8
8
  # llm = Langchain::LLM::MistralAI.new(api_key: ENV["MISTRAL_AI_API_KEY"])
9
9
  class MistralAI < Base
10
10
  DEFAULTS = {
11
- chat_completion_model_name: "mistral-large-latest",
12
- embeddings_model_name: "mistral-embed"
11
+ chat_model: "mistral-large-latest",
12
+ embedding_model: "mistral-embed"
13
13
  }.freeze
14
14
 
15
15
  attr_reader :defaults
@@ -24,7 +24,7 @@ module Langchain::LLM
24
24
 
25
25
  @defaults = DEFAULTS.merge(default_options)
26
26
  chat_parameters.update(
27
- model: {default: @defaults[:chat_completion_model_name]},
27
+ model: {default: @defaults[:chat_model]},
28
28
  n: {default: @defaults[:n]},
29
29
  safe_prompt: {},
30
30
  temperature: {default: @defaults[:temperature]},
@@ -44,7 +44,7 @@ module Langchain::LLM
44
44
 
45
45
  def embed(
46
46
  text:,
47
- model: defaults[:embeddings_model_name],
47
+ model: defaults[:embedding_model],
48
48
  encoding_format: nil
49
49
  )
50
50
  params = {
@@ -12,9 +12,10 @@ module Langchain::LLM
12
12
 
13
13
  DEFAULTS = {
14
14
  temperature: 0.0,
15
- completion_model_name: "llama3.1",
16
- embeddings_model_name: "llama3.1",
17
- chat_completion_model_name: "llama3.1"
15
+ completion_model: "llama3.1",
16
+ embedding_model: "llama3.1",
17
+ chat_model: "llama3.1",
18
+ options: {}
18
19
  }.freeze
19
20
 
20
21
  EMBEDDING_SIZES = {
@@ -41,11 +42,12 @@ module Langchain::LLM
41
42
  @api_key = api_key
42
43
  @defaults = DEFAULTS.merge(default_options)
43
44
  chat_parameters.update(
44
- model: {default: @defaults[:chat_completion_model_name]},
45
+ model: {default: @defaults[:chat_model]},
45
46
  temperature: {default: @defaults[:temperature]},
46
47
  template: {},
47
48
  stream: {default: false},
48
- response_format: {default: @defaults[:response_format]}
49
+ response_format: {default: @defaults[:response_format]},
50
+ options: {default: @defaults[:options]}
49
51
  )
50
52
  chat_parameters.remap(response_format: :format)
51
53
  end
@@ -55,7 +57,7 @@ module Langchain::LLM
55
57
  def default_dimensions
56
58
  # since Ollama can run multiple models, look it up or generate an embedding and return the size
57
59
  @default_dimensions ||=
58
- EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name].to_sym) do
60
+ EMBEDDING_SIZES.fetch(defaults[:embedding_model].to_sym) do
59
61
  embed(text: "test").embedding.size
60
62
  end
61
63
  end
@@ -77,7 +79,7 @@ module Langchain::LLM
77
79
  #
78
80
  def complete(
79
81
  prompt:,
80
- model: defaults[:completion_model_name],
82
+ model: defaults[:completion_model],
81
83
  images: nil,
82
84
  format: nil,
83
85
  system: nil,
@@ -199,7 +201,7 @@ module Langchain::LLM
199
201
  #
200
202
  def embed(
201
203
  text:,
202
- model: defaults[:embeddings_model_name],
204
+ model: defaults[:embedding_model],
203
205
  mirostat: nil,
204
206
  mirostat_eta: nil,
205
207
  mirostat_tau: nil,
@@ -16,8 +16,8 @@ module Langchain::LLM
16
16
  DEFAULTS = {
17
17
  n: 1,
18
18
  temperature: 0.0,
19
- chat_completion_model_name: "gpt-4o-mini",
20
- embeddings_model_name: "text-embedding-3-small"
19
+ chat_model: "gpt-4o-mini",
20
+ embedding_model: "text-embedding-3-small"
21
21
  }.freeze
22
22
 
23
23
  EMBEDDING_SIZES = {
@@ -41,7 +41,7 @@ module Langchain::LLM
41
41
 
42
42
  @defaults = DEFAULTS.merge(default_options)
43
43
  chat_parameters.update(
44
- model: {default: @defaults[:chat_completion_model_name]},
44
+ model: {default: @defaults[:chat_model]},
45
45
  logprobs: {},
46
46
  top_logprobs: {},
47
47
  n: {default: @defaults[:n]},
@@ -61,7 +61,7 @@ module Langchain::LLM
61
61
  # @return [Langchain::LLM::OpenAIResponse] Response object
62
62
  def embed(
63
63
  text:,
64
- model: defaults[:embeddings_model_name],
64
+ model: defaults[:embedding_model],
65
65
  encoding_format: nil,
66
66
  user: nil,
67
67
  dimensions: @defaults[:dimensions]
@@ -109,6 +109,7 @@ module Langchain::LLM
109
109
  messages = [{role: "user", content: prompt}]
110
110
  chat(messages: messages, **params)
111
111
  end
112
+
112
113
  # rubocop:enable Style/ArgumentsForwarding
113
114
 
114
115
  # Generate a chat completion for given messages.
@@ -159,7 +160,7 @@ module Langchain::LLM
159
160
  end
160
161
 
161
162
  def default_dimensions
162
- @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name])
163
+ @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embedding_model])
163
164
  end
164
165
 
165
166
  private
@@ -37,7 +37,10 @@ module Langchain::LLM::Parameters
37
37
  parallel_tool_calls: {},
38
38
 
39
39
  # Additional optional parameters
40
- logit_bias: {}
40
+ logit_bias: {},
41
+
42
+ # Additional llm options. Ollama only.
43
+ options: {}
41
44
  }
42
45
 
43
46
  def initialize(parameters: {})
@@ -14,8 +14,8 @@ module Langchain::LLM
14
14
  # TODO: Figure out how to send the temperature to the API
15
15
  temperature: 0.01, # Minimum accepted value
16
16
  # TODO: Design the interface to pass and use different models
17
- completion_model_name: "replicate/vicuna-13b",
18
- embeddings_model_name: "creatorrr/all-mpnet-base-v2",
17
+ completion_model: "replicate/vicuna-13b",
18
+ embedding_model: "creatorrr/all-mpnet-base-v2",
19
19
  dimensions: 384
20
20
  }.freeze
21
21
 
@@ -49,7 +49,7 @@ module Langchain::LLM
49
49
  sleep(0.1)
50
50
  end
51
51
 
52
- Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:embeddings_model_name])
52
+ Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:embedding_model])
53
53
  end
54
54
 
55
55
  #
@@ -66,7 +66,7 @@ module Langchain::LLM
66
66
  sleep(0.1)
67
67
  end
68
68
 
69
- Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:completion_model_name])
69
+ Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:completion_model])
70
70
  end
71
71
 
72
72
  #
@@ -94,11 +94,11 @@ module Langchain::LLM
94
94
  private
95
95
 
96
96
  def completion_model
97
- @completion_model ||= client.retrieve_model(@defaults[:completion_model_name]).latest_version
97
+ @completion_model ||= client.retrieve_model(@defaults[:completion_model]).latest_version
98
98
  end
99
99
 
100
100
  def embeddings_model
101
- @embeddings_model ||= client.retrieve_model(@defaults[:embeddings_model_name]).latest_version
101
+ @embeddings_model ||= client.retrieve_model(@defaults[:embedding_model]).latest_version
102
102
  end
103
103
  end
104
104
  end
@@ -9,5 +9,25 @@ module Langchain::LLM
9
9
  def completion
10
10
  completions.dig(0, :data, :text)
11
11
  end
12
+
13
+ def chat_completion
14
+ raw_response.dig(:choices, 0, :message, :content)
15
+ end
16
+
17
+ def prompt_tokens
18
+ raw_response.dig(:usage, :prompt_tokens).to_i
19
+ end
20
+
21
+ def completion_tokens
22
+ raw_response.dig(:usage, :completion_tokens).to_i
23
+ end
24
+
25
+ def total_tokens
26
+ raw_response.dig(:usage, :total_tokens).to_i
27
+ end
28
+
29
+ def role
30
+ raw_response.dig(:choices, 0, :message, :role)
31
+ end
12
32
  end
13
33
  end
@@ -103,6 +103,13 @@ module Langchain::ToolDefinition
103
103
  # @return [String] JSON string of schemas in Anthropic format
104
104
  def to_anthropic_format
105
105
  @schemas.values.map do |schema|
106
+ # Adds a default input_schema if no parameters are present
107
+ schema[:function][:parameters] ||= {
108
+ type: "object",
109
+ properties: {},
110
+ required: []
111
+ }
112
+
106
113
  schema[:function].transform_keys(parameters: :input_schema)
107
114
  end
108
115
  end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open-uri"
4
+ require "base64"
5
+
6
+ module Langchain
7
+ module Utils
8
+ class ImageWrapper
9
+ attr_reader :image_url
10
+
11
+ def initialize(image_url)
12
+ @image_url = image_url
13
+ end
14
+
15
+ def base64
16
+ @base64 ||= begin
17
+ image_data = open_image.read
18
+ Base64.strict_encode64(image_data)
19
+ end
20
+ end
21
+
22
+ def mime_type
23
+ # TODO: Make it work with local files
24
+ open_image.meta["content-type"]
25
+ end
26
+
27
+ private
28
+
29
+ def open_image
30
+ # TODO: Make it work with local files
31
+ uri = URI.parse(image_url)
32
+ raise URI::InvalidURIError, "Invalid URL scheme" unless %w[http https].include?(uri.scheme)
33
+ @open_image ||= URI.open(image_url) # rubocop:disable Security/Open
34
+ end
35
+ end
36
+ end
37
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.18.0"
4
+ VERSION = "0.19.1"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.18.0
4
+ version: 0.19.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-10-12 00:00:00.000000000 Z
11
+ date: 2024-11-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -640,6 +640,7 @@ files:
640
640
  - lib/langchain/assistant.rb
641
641
  - lib/langchain/assistant/llm/adapter.rb
642
642
  - lib/langchain/assistant/llm/adapters/anthropic.rb
643
+ - lib/langchain/assistant/llm/adapters/aws_bedrock_anthropic.rb
643
644
  - lib/langchain/assistant/llm/adapters/base.rb
644
645
  - lib/langchain/assistant/llm/adapters/google_gemini.rb
645
646
  - lib/langchain/assistant/llm/adapters/mistral_ai.rb
@@ -736,6 +737,7 @@ files:
736
737
  - lib/langchain/tool_definition.rb
737
738
  - lib/langchain/utils/cosine_similarity.rb
738
739
  - lib/langchain/utils/hash_transformer.rb
740
+ - lib/langchain/utils/image_wrapper.rb
739
741
  - lib/langchain/utils/to_boolean.rb
740
742
  - lib/langchain/vectorsearch/base.rb
741
743
  - lib/langchain/vectorsearch/chroma.rb