langchainrb 0.11.3 → 0.11.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c678ae75bc25b0501223f5b6ffd396a9159af4d0ddd87ddb1657429ed2ba24ce
4
- data.tar.gz: df50ef0a6d9c1a3100153a06084556cac983069d1a38739bd6606f39f63bd332
3
+ metadata.gz: 22781a12fbb032cfd70fe45c9a0b96621b2d47f4d6ef912547b327790d6dfcaa
4
+ data.tar.gz: c8ab04c3bf520e7af0d298f4cd15e9c9df149fd7618d8998c53ec8b684ddddea
5
5
  SHA512:
6
- metadata.gz: 3ec9f92f4c6221184b7a0a2c118caa6a56e7bc8505a83d5b5acb4daeb769ff90d8822b43c28f57adc06435dd2df5577268721345c4061d3dad6ecb919be18efc
7
- data.tar.gz: 53d54b0c6a82082438f2e2f1ca70d097a9b916bc283b72e52ce466b6f012c9624cf094586b17e93eaa49a796bf9911051d3f4b494b9ecc93c3ac6ee6cdc7e8fe
6
+ metadata.gz: 65c2068d4e0289b4aa081ab659b4470a312c387a138b4a367cf5717a9d232b39ab44f7dcbfddcb382e0f8ea1b735553c630e5106d49884e6d91c6cee667fcd1d
7
+ data.tar.gz: a2a15c08e593903e6fdf67cc2ee1e3a5188c9d2aaae6a2c3dad991dbdb1b13c75cd76f5a557bbbc2581b30f87860d5719b75bb65ecb4c1c83e05b8e7aedc5714
data/CHANGELOG.md CHANGED
@@ -1,9 +1,10 @@
1
1
  ## [Unreleased]
2
- - New `Langchain::Processors::Pptx` to parse .pptx files
3
- - New `Langchain::LLM::Anthropic#chat()` support
4
- - Misc fixes
5
2
 
6
- ## [0.11.3]
3
+ ## [0.11.4] - 2024-04-19
4
+ - New `Langchain::LLM::AWSBedrock#chat()` to wrap Bedrock Claude requests
5
+ - New `Langchain::LLM::OllamaResponse#total_tokens()` method
6
+
7
+ ## [0.11.3] - 2024-04-16
7
8
  - New `Langchain::Processors::Pptx` to parse .pptx files
8
9
  - New `Langchain::LLM::Anthropic#chat()` support
9
10
  - Misc fixes
data/README.md CHANGED
@@ -60,7 +60,7 @@ Langchain.rb wraps supported LLMs in a unified interface allowing you to easily
60
60
  | [OpenAI](https://openai.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ❌ | Including Azure OpenAI |
61
61
  | [AI21](https://ai21.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ❌ | ✅ | |
62
62
  | [Anthropic](https://anthropic.com/?utm_source=langchainrb&utm_medium=github) | ❌ | ✅ | ✅ | ❌ | |
63
- | [AWS Bedrock](https://aws.amazon.com/bedrock?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | | ❌ | Provides AWS, Cohere, AI21, Antropic and Stability AI models |
63
+ | [AWS Bedrock](https://aws.amazon.com/bedrock?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | | ❌ | Provides AWS, Cohere, AI21, Antropic and Stability AI models |
64
64
  | [Cohere](https://cohere.com/?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
65
65
  | [GooglePalm](https://ai.google/discover/palm2?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ✅ | ✅ | |
66
66
  | [Google Vertex AI](https://cloud.google.com/vertex-ai?utm_source=langchainrb&utm_medium=github) | ✅ | ✅ | ❌ | ✅ | |
@@ -46,7 +46,10 @@ module Langchain::LLM
46
46
  }
47
47
  }.freeze
48
48
 
49
+ attr_reader :client, :defaults
50
+
49
51
  SUPPORTED_COMPLETION_PROVIDERS = %i[anthropic cohere ai21].freeze
52
+ SUPPORTED_CHAT_COMPLETION_PROVIDERS = %i[anthropic].freeze
50
53
  SUPPORTED_EMBEDDING_PROVIDERS = %i[amazon].freeze
51
54
 
52
55
  def initialize(completion_model: DEFAULTS[:completion_model_name], embedding_model: DEFAULTS[:embedding_model_name], aws_client_options: {}, default_options: {})
@@ -91,6 +94,8 @@ module Langchain::LLM
91
94
  def complete(prompt:, **params)
92
95
  raise "Completion provider #{completion_provider} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(completion_provider)
93
96
 
97
+ raise "Model #{@defaults[:completion_model_name]} only supports #chat." if @defaults[:completion_model_name].include?("claude-3")
98
+
94
99
  parameters = compose_parameters params
95
100
 
96
101
  parameters[:prompt] = wrap_prompt prompt
@@ -105,6 +110,53 @@ module Langchain::LLM
105
110
  parse_response response
106
111
  end
107
112
 
113
+ # Generate a chat completion for a given prompt
114
+ # Currently only configured to work with the Anthropic provider and
115
+ # the claude-3 model family
116
+ # @param messages [Array] The messages to generate a completion for
117
+ # @param system [String] The system prompt to provide instructions
118
+ # @param model [String] The model to use for completion defaults to @defaults[:chat_completion_model_name]
119
+ # @param max_tokens [Integer] The maximum number of tokens to generate
120
+ # @param stop_sequences [Array] The stop sequences to use for completion
121
+ # @param temperature [Float] The temperature to use for completion
122
+ # @param top_p [Float] The top p to use for completion
123
+ # @param top_k [Integer] The top k to use for completion
124
+ # @return [Langchain::LLM::AnthropicMessagesResponse] Response object
125
+ def chat(
126
+ messages: [],
127
+ system: nil,
128
+ model: defaults[:completion_model_name],
129
+ max_tokens: defaults[:max_tokens_to_sample],
130
+ stop_sequences: nil,
131
+ temperature: nil,
132
+ top_p: nil,
133
+ top_k: nil
134
+ )
135
+ raise ArgumentError.new("messages argument is required") if messages.empty?
136
+
137
+ raise "Model #{model} does not support chat completions." unless Langchain::LLM::AwsBedrock::SUPPORTED_CHAT_COMPLETION_PROVIDERS.include?(completion_provider)
138
+
139
+ inference_parameters = {
140
+ messages: messages,
141
+ max_tokens: max_tokens,
142
+ anthropic_version: @defaults[:anthropic_version]
143
+ }
144
+ inference_parameters[:system] = system if system
145
+ inference_parameters[:stop_sequences] = stop_sequences if stop_sequences
146
+ inference_parameters[:temperature] = temperature if temperature
147
+ inference_parameters[:top_p] = top_p if top_p
148
+ inference_parameters[:top_k] = top_k if top_k
149
+
150
+ response = client.invoke_model({
151
+ model_id: model,
152
+ body: inference_parameters.to_json,
153
+ content_type: "application/json",
154
+ accept: "application/json"
155
+ })
156
+
157
+ parse_response response
158
+ end
159
+
108
160
  private
109
161
 
110
162
  def completion_provider
@@ -44,5 +44,9 @@ module Langchain::LLM
44
44
  def completion_tokens
45
45
  raw_response.dig("eval_count")
46
46
  end
47
+
48
+ def total_tokens
49
+ prompt_tokens + completion_tokens
50
+ end
47
51
  end
48
52
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.11.3"
4
+ VERSION = "0.11.4"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.3
4
+ version: 0.11.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-04-17 00:00:00.000000000 Z
11
+ date: 2024-04-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport