langchainrb 0.9.5 → 0.10.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 833d4dafdf55e45852261e1c86b8121fd3ed1b61766a7fb121589e6549b255e0
4
- data.tar.gz: d3834e7a5d15cf1ddd45bfc2db69b73afb5cf60b6b43004a398a688b5d5932e1
3
+ metadata.gz: 19b23746583868d1a5eca30d5e8f30bf548cfc253d8cf20fa13c27bb4e03b967
4
+ data.tar.gz: 6de6bea0a348b812d09745af6f14c03def1f8d7a4fb7a246ab328de4992b9e8e
5
5
  SHA512:
6
- metadata.gz: 98dbc07b39f956d7425c562451d9eced8162cd7d7e181ac6188d029090275f5485376db3afeb826e730c168f6d64a7cd6af90503974f8b2696b1555f3d18b589
7
- data.tar.gz: 3e641d27e3ccdedfa363c7bfecb7f6a1293c1f866421db3ac5e74dcd4615934a132345f9c7912fec0200d2fe626747faaefa592592066e336d33c4db5d3fd050
6
+ metadata.gz: 12e39b5c0c523d7ea798f4fc0729fc53f0e0754db43261ec9c464369a4a01fea18ef7c30bee9d26b45a08add746cd45b0550f1a741dda0f84ae4cd72be9481d1
7
+ data.tar.gz: abf235f4d1dffd76d4a73a45a8e3e3af81ed358eadb43189ccef6cd8d3ff7739c8e1fcb38287c786dd56de6edf8944efc16270936cd6d4e4763e2ec4f7c84eea
data/CHANGELOG.md CHANGED
@@ -1,5 +1,15 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.10.2]
4
+ - New Langchain::LLM::Mistral
5
+ - Drop Ruby 3.0 support
6
+ - Fixes Zeitwerk::NameError
7
+
8
+ ## [0.10.1] - GEM VERSION YANKED
9
+
10
+ ## [0.10.0]
11
+ - Delete `Langchain::Conversation` class
12
+
3
13
  ## [0.9.5]
4
14
  - Now using OpenAI's "text-embedding-3-small" model to generate embeddings
5
15
  - Added `remove_texts(ids:)` method to Qdrant and Chroma
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ # Gem requirements:
5
+ # gem "mistral-ai"
6
+ #
7
+ # Usage:
8
+ # llm = Langchain::LLM::MistralAI.new(api_key: ENV["OPENAI_API_KEY"])
9
+ class MistralAI < Base
10
+ DEFAULTS = {
11
+ chat_completion_model_name: "mistral-medium",
12
+ embeddings_model_name: "mistral-embed"
13
+ }.freeze
14
+
15
+ attr_reader :defaults
16
+
17
+ def initialize(api_key:, default_options: {})
18
+ depends_on "mistral-ai"
19
+
20
+ @client = Mistral.new(
21
+ credentials: {api_key: ENV["MISTRAL_AI_API_KEY"]},
22
+ options: {server_sent_events: true}
23
+ )
24
+
25
+ @defaults = DEFAULTS.merge(default_options)
26
+ end
27
+
28
+ def chat(
29
+ messages:,
30
+ model: defaults[:chat_completion_model_name],
31
+ temperature: nil,
32
+ top_p: nil,
33
+ max_tokens: nil,
34
+ safe_prompt: nil,
35
+ random_seed: nil
36
+ )
37
+ params = {
38
+ messages: messages,
39
+ model: model
40
+ }
41
+ params[:temperature] = temperature if temperature
42
+ params[:top_p] = top_p if top_p
43
+ params[:max_tokens] = max_tokens if max_tokens
44
+ params[:safe_prompt] = safe_prompt if safe_prompt
45
+ params[:random_seed] = random_seed if random_seed
46
+
47
+ response = client.chat_completions(params)
48
+
49
+ Langchain::LLM::MistralAIResponse.new(response.to_h)
50
+ end
51
+
52
+ def embed(
53
+ text:,
54
+ model: defaults[:embeddings_model_name],
55
+ encoding_format: nil
56
+ )
57
+ params = {
58
+ input: text,
59
+ model: model
60
+ }
61
+ params[:encoding_format] = encoding_format if encoding_format
62
+
63
+ response = client.embeddings(params)
64
+
65
+ Langchain::LLM::MistralAIResponse.new(response.to_h)
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ class MistralAIResponse < BaseResponse
5
+ def model
6
+ raw_response["model"]
7
+ end
8
+
9
+ def chat_completion
10
+ raw_response.dig("choices", 0, "message", "content")
11
+ end
12
+
13
+ def role
14
+ raw_response.dig("choices", 0, "message", "role")
15
+ end
16
+
17
+ def embedding
18
+ raw_response.dig("data", 0, "embedding")
19
+ end
20
+
21
+ def prompt_tokens
22
+ raw_response.dig("usage", "prompt_tokens")
23
+ end
24
+
25
+ def total_tokens
26
+ raw_response.dig("usage", "total_tokens")
27
+ end
28
+
29
+ def completion_tokens
30
+ raw_response.dig("usage", "completion_tokens")
31
+ end
32
+
33
+ def created_at
34
+ if raw_response.dig("created_at")
35
+ Time.at(raw_response.dig("created_at"))
36
+ end
37
+ end
38
+ end
39
+ end
@@ -1,45 +1,41 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # RubyCodeInterpreter does not work with Ruby 3.3;
4
- # https://github.com/ukutaht/safe_ruby/issues/4
5
- if RUBY_VERSION <= "3.2"
6
- module Langchain::Tool
7
- class RubyCodeInterpreter < Base
8
- #
9
- # A tool that execute Ruby code in a sandboxed environment.
10
- #
11
- # Gem requirements:
12
- # gem "safe_ruby", "~> 1.0.4"
13
- #
14
- # Usage:
15
- # interpreter = Langchain::Tool::RubyCodeInterpreter.new
16
- #
17
- NAME = "ruby_code_interpreter"
18
- ANNOTATIONS_PATH = Langchain.root.join("./langchain/tool/#{NAME}/#{NAME}.json").to_path
3
+ module Langchain::Tool
4
+ class RubyCodeInterpreter < Base
5
+ #
6
+ # A tool that execute Ruby code in a sandboxed environment.
7
+ #
8
+ # Gem requirements:
9
+ # gem "safe_ruby", "~> 1.0.4"
10
+ #
11
+ # Usage:
12
+ # interpreter = Langchain::Tool::RubyCodeInterpreter.new
13
+ #
14
+ NAME = "ruby_code_interpreter"
15
+ ANNOTATIONS_PATH = Langchain.root.join("./langchain/tool/#{NAME}/#{NAME}.json").to_path
19
16
 
20
- description <<~DESC
21
- A Ruby code interpreter. Use this to execute ruby expressions. Input should be a valid ruby expression. If you want to see the output of the tool, make sure to return a value.
22
- DESC
17
+ description <<~DESC
18
+ A Ruby code interpreter. Use this to execute ruby expressions. Input should be a valid ruby expression. If you want to see the output of the tool, make sure to return a value.
19
+ DESC
23
20
 
24
- def initialize(timeout: 30)
25
- depends_on "safe_ruby"
21
+ def initialize(timeout: 30)
22
+ depends_on "safe_ruby"
26
23
 
27
- @timeout = timeout
28
- end
24
+ @timeout = timeout
25
+ end
29
26
 
30
- # Executes Ruby code in a sandboxes environment.
31
- #
32
- # @param input [String] ruby code expression
33
- # @return [String] Answer
34
- def execute(input:)
35
- Langchain.logger.info("Executing \"#{input}\"", for: self.class)
27
+ # Executes Ruby code in a sandboxes environment.
28
+ #
29
+ # @param input [String] ruby code expression
30
+ # @return [String] Answer
31
+ def execute(input:)
32
+ Langchain.logger.info("Executing \"#{input}\"", for: self.class)
36
33
 
37
- safe_eval(input)
38
- end
34
+ safe_eval(input)
35
+ end
39
36
 
40
- def safe_eval(code)
41
- SafeRuby.eval(code, timeout: @timeout)
42
- end
37
+ def safe_eval(code)
38
+ SafeRuby.eval(code, timeout: @timeout)
43
39
  end
44
40
  end
45
41
  end
@@ -124,6 +124,11 @@ module Langchain::Vectorsearch
124
124
  raise NotImplementedError, "#{self.class.name} does not support updating texts"
125
125
  end
126
126
 
127
+ # Method supported by Vectorsearch DB to delete a list of texts from the index
128
+ def remove_texts(...)
129
+ raise NotImplementedError, "#{self.class.name} does not support deleting texts"
130
+ end
131
+
127
132
  # Method supported by Vectorsearch DB to search for similar texts in the index
128
133
  def similarity_search(...)
129
134
  raise NotImplementedError, "#{self.class.name} does not support similarity search"
@@ -75,6 +75,17 @@ module Langchain::Vectorsearch
75
75
  es_client.bulk(body: body)
76
76
  end
77
77
 
78
+ # Remove a list of texts from the index
79
+ # @param ids [Array<Integer>] The list of ids to delete
80
+ # @return [Elasticsearch::Response] from the Elasticsearch server
81
+ def remove_texts(ids: [])
82
+ body = ids.map do |id|
83
+ {delete: {_index: index_name, _id: id}}
84
+ end
85
+
86
+ es_client.bulk(body: body)
87
+ end
88
+
78
89
  # Create the index with the default schema
79
90
  # @return [Elasticsearch::Response] Index creation
80
91
  def create_default_schema
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.9.5"
4
+ VERSION = "0.10.2"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -16,6 +16,8 @@ loader.inflector.inflect(
16
16
  "json" => "JSON",
17
17
  "jsonl" => "JSONL",
18
18
  "llm" => "LLM",
19
+ "mistral_ai" => "MistralAI",
20
+ "mistral_ai_response" => "MistralAIResponse",
19
21
  "openai" => "OpenAI",
20
22
  "openai_validator" => "OpenAIValidator",
21
23
  "openai_response" => "OpenAIResponse",
@@ -32,6 +34,11 @@ loader.collapse("#{__dir__}/langchain/tool/google_search")
32
34
  loader.collapse("#{__dir__}/langchain/tool/ruby_code_interpreter")
33
35
  loader.collapse("#{__dir__}/langchain/tool/weather")
34
36
  loader.collapse("#{__dir__}/langchain/tool/wikipedia")
37
+
38
+ # RubyCodeInterpreter does not work with Ruby 3.3;
39
+ # https://github.com/ukutaht/safe_ruby/issues/4
40
+ loader.ignore("#{__dir__}/langchain/tool/ruby_code_interpreter") if RUBY_VERSION >= "3.3.0"
41
+
35
42
  loader.setup
36
43
 
37
44
  # Langchain.rb a is library for building LLM-backed Ruby applications. It is an abstraction layer that sits on top of the emerging AI-related tools that makes it easy for developers to consume and string those services together.
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.5
4
+ version: 0.10.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-03-15 00:00:00.000000000 Z
11
+ date: 2024-03-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -472,6 +472,20 @@ dependencies:
472
472
  - - "~>"
473
473
  - !ruby/object:Gem::Version
474
474
  version: '2.8'
475
+ - !ruby/object:Gem::Dependency
476
+ name: mistral-ai
477
+ requirement: !ruby/object:Gem::Requirement
478
+ requirements:
479
+ - - ">="
480
+ - !ruby/object:Gem::Version
481
+ version: '0'
482
+ type: :development
483
+ prerelease: false
484
+ version_requirements: !ruby/object:Gem::Requirement
485
+ requirements:
486
+ - - ">="
487
+ - !ruby/object:Gem::Version
488
+ version: '0'
475
489
  - !ruby/object:Gem::Dependency
476
490
  name: open-weather-ruby-client
477
491
  requirement: !ruby/object:Gem::Requirement
@@ -698,12 +712,6 @@ files:
698
712
  - lib/langchain/chunker/sentence.rb
699
713
  - lib/langchain/chunker/text.rb
700
714
  - lib/langchain/contextual_logger.rb
701
- - lib/langchain/conversation.rb
702
- - lib/langchain/conversation/context.rb
703
- - lib/langchain/conversation/memory.rb
704
- - lib/langchain/conversation/message.rb
705
- - lib/langchain/conversation/prompt.rb
706
- - lib/langchain/conversation/response.rb
707
715
  - lib/langchain/data.rb
708
716
  - lib/langchain/dependency_helper.rb
709
717
  - lib/langchain/evals/ragas/answer_relevance.rb
@@ -724,6 +732,7 @@ files:
724
732
  - lib/langchain/llm/google_vertex_ai.rb
725
733
  - lib/langchain/llm/hugging_face.rb
726
734
  - lib/langchain/llm/llama_cpp.rb
735
+ - lib/langchain/llm/mistral_ai.rb
727
736
  - lib/langchain/llm/ollama.rb
728
737
  - lib/langchain/llm/openai.rb
729
738
  - lib/langchain/llm/prompts/ollama/summarize_template.yaml
@@ -738,6 +747,7 @@ files:
738
747
  - lib/langchain/llm/response/google_vertex_ai_response.rb
739
748
  - lib/langchain/llm/response/hugging_face_response.rb
740
749
  - lib/langchain/llm/response/llama_cpp_response.rb
750
+ - lib/langchain/llm/response/mistral_ai_response.rb
741
751
  - lib/langchain/llm/response/ollama_response.rb
742
752
  - lib/langchain/llm/response/openai_response.rb
743
753
  - lib/langchain/llm/response/replicate_response.rb
@@ -812,7 +822,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
812
822
  requirements:
813
823
  - - ">="
814
824
  - !ruby/object:Gem::Version
815
- version: 3.0.0
825
+ version: 3.1.0
816
826
  required_rubygems_version: !ruby/object:Gem::Requirement
817
827
  requirements:
818
828
  - - ">="
@@ -1,8 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Conversation
5
- class Context < Message
6
- end
7
- end
8
- end
@@ -1,83 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Conversation
5
- class Memory
6
- attr_reader :messages
7
-
8
- # The least number of tokens we want to be under the limit by
9
- TOKEN_LEEWAY = 20
10
-
11
- def initialize(llm:, messages: [], **options)
12
- warn "[DEPRECATION] `Langchain::Conversation::Memory` is deprecated. Please use `Langchain::Assistant` instead."
13
-
14
- @llm = llm
15
- @context = nil
16
- @summary = nil
17
- @messages = messages
18
- @strategy = options.delete(:strategy) || :truncate
19
- @options = options
20
- end
21
-
22
- def set_context(message)
23
- @context = message
24
- end
25
-
26
- def append_message(message)
27
- @messages.append(message)
28
- end
29
-
30
- def reduce_messages(exception)
31
- case @strategy
32
- when :truncate
33
- truncate_messages(exception)
34
- when :summarize
35
- summarize_messages
36
- else
37
- raise "Unknown strategy: #{@options[:strategy]}"
38
- end
39
- end
40
-
41
- def context
42
- return if @context.nil? && @summary.nil?
43
-
44
- Context.new([@context, @summary].compact.join("\n"))
45
- end
46
-
47
- private
48
-
49
- def truncate_messages(exception)
50
- raise exception if @messages.size == 1
51
-
52
- token_overflow = exception.token_overflow
53
-
54
- @messages = @messages.drop_while do |message|
55
- proceed = token_overflow > -TOKEN_LEEWAY
56
- token_overflow -= token_length(message.to_json, model_name, llm: @llm)
57
-
58
- proceed
59
- end
60
- end
61
-
62
- def summarize_messages
63
- history = [@summary, @messages.to_json].compact.join("\n")
64
- partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
65
-
66
- @summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
67
-
68
- @messages = [@messages.last]
69
- end
70
-
71
- def partition_messages
72
- end
73
-
74
- def model_name
75
- @llm.class::DEFAULTS[:chat_completion_model_name]
76
- end
77
-
78
- def token_length(content, model_name, options)
79
- @llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
80
- end
81
- end
82
- end
83
- end
@@ -1,50 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Conversation
5
- class Message
6
- attr_reader :content
7
-
8
- ROLE_MAPPING = {
9
- context: "system",
10
- prompt: "user",
11
- response: "assistant"
12
- }
13
-
14
- def initialize(content)
15
- warn "[DEPRECATION] `Langchain::Conversation::*` is deprecated. Please use `Langchain::Assistant` and `Langchain::Messages` classes instead."
16
-
17
- @content = content
18
- end
19
-
20
- def role
21
- ROLE_MAPPING[type]
22
- end
23
-
24
- def to_s
25
- content
26
- end
27
-
28
- def to_h
29
- {
30
- role: role,
31
- content: content
32
- }
33
- end
34
-
35
- def ==(other)
36
- to_json == other.to_json
37
- end
38
-
39
- def to_json(options = {})
40
- to_h.to_json
41
- end
42
-
43
- private
44
-
45
- def type
46
- self.class.to_s.split("::").last.downcase.to_sym
47
- end
48
- end
49
- end
50
- end
@@ -1,8 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Conversation
5
- class Prompt < Message
6
- end
7
- end
8
- end
@@ -1,8 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Conversation
5
- class Response < Message
6
- end
7
- end
8
- end
@@ -1,82 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- #
5
- # A high-level API for running a conversation with an LLM.
6
- # Currently supports: OpenAI and Google PaLM LLMs.
7
- #
8
- # Usage:
9
- # llm = Langchain::LLM::OpenAI.new(api_key: "YOUR_API_KEY")
10
- # chat = Langchain::Conversation.new(llm: llm)
11
- # chat.set_context("You are a chatbot from the future")
12
- # chat.message("Tell me about future technologies")
13
- #
14
- # To stream the chat response:
15
- # chat = Langchain::Conversation.new(llm: llm) do |chunk|
16
- # print(chunk)
17
- # end
18
- #
19
- class Conversation
20
- attr_reader :options
21
-
22
- # Intialize Conversation with a LLM
23
- #
24
- # @param llm [Object] The LLM to use for the conversation
25
- # @param options [Hash] Options to pass to the LLM, like temperature, top_k, etc.
26
- # @return [Langchain::Conversation] The Langchain::Conversation instance
27
- def initialize(llm:, **options, &block)
28
- warn "[DEPRECATION] `Langchain::Conversation` is deprecated. Please use `Langchain::Assistant` instead."
29
-
30
- @llm = llm
31
- @context = nil
32
- @memory = ::Langchain::Conversation::Memory.new(
33
- llm: llm,
34
- messages: options.delete(:messages) || [],
35
- strategy: options.delete(:memory_strategy)
36
- )
37
- @options = options
38
- @block = block
39
- end
40
-
41
- # Set the context of the conversation. Usually used to set the model's persona.
42
- # @param message [String] The context of the conversation
43
- def set_context(message)
44
- @memory.set_context ::Langchain::Conversation::Context.new(message)
45
- end
46
-
47
- # Message the model with a prompt and return the response.
48
- # @param message [String] The prompt to message the model with
49
- # @return [Response] The response from the model
50
- def message(message)
51
- @memory.append_message ::Langchain::Conversation::Prompt.new(message)
52
- ai_message = ::Langchain::Conversation::Response.new(llm_response.chat_completion)
53
- @memory.append_message(ai_message)
54
- ai_message
55
- end
56
-
57
- # Messages from conversation memory
58
- # @return [Array<Prompt|Response>] The messages from the conversation memory
59
- def messages
60
- @memory.messages
61
- end
62
-
63
- # Context from conversation memory
64
- # @return [Context] Context from conversation memory
65
- def context
66
- @memory.context
67
- end
68
-
69
- private
70
-
71
- def llm_response
72
- message_history = messages.map(&:to_h)
73
- # Prepend the system message as context as the first message
74
- message_history.prepend({role: "system", content: @memory.context.to_s}) if @memory.context
75
-
76
- @llm.chat(messages: message_history, **@options, &@block)
77
- rescue Langchain::Utils::TokenLength::TokenLimitExceeded => exception
78
- @memory.reduce_messages(exception)
79
- retry
80
- end
81
- end
82
- end