langchainrb 0.9.5 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +3 -0
- data/lib/langchain/vectorsearch/base.rb +5 -0
- data/lib/langchain/vectorsearch/elasticsearch.rb +11 -0
- data/lib/langchain/version.rb +1 -1
- metadata +2 -8
- data/lib/langchain/conversation/context.rb +0 -8
- data/lib/langchain/conversation/memory.rb +0 -83
- data/lib/langchain/conversation/message.rb +0 -50
- data/lib/langchain/conversation/prompt.rb +0 -8
- data/lib/langchain/conversation/response.rb +0 -8
- data/lib/langchain/conversation.rb +0 -82
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 81783847d8152dbcff9e1ea0b51afee5619fb863b1cf3bf2f66912a52e96e797
|
4
|
+
data.tar.gz: 877bf77b04771a9a898d478967a0df5ef8ab2d62371f7e71c67e76daf39cffb9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8a3c6e98399f8d76d10c0ccdd6af2650c77962662490a9541daf1bcb76f693c19637b8bdd140800dd920f116753178bdda7e7a2dd0649c9fdafaad837df3a3db
|
7
|
+
data.tar.gz: 8f0a18c9c3c25c2bea9041e5884aa2a29e79549fbbd73255f27bc2a6053e4524b4f3d0a4896d002291cf6f2b74c277462aa85d3933a2a3c4ab7499901d9aa89b
|
data/CHANGELOG.md
CHANGED
@@ -124,6 +124,11 @@ module Langchain::Vectorsearch
|
|
124
124
|
raise NotImplementedError, "#{self.class.name} does not support updating texts"
|
125
125
|
end
|
126
126
|
|
127
|
+
# Method supported by Vectorsearch DB to delete a list of texts from the index
|
128
|
+
def remove_texts(...)
|
129
|
+
raise NotImplementedError, "#{self.class.name} does not support deleting texts"
|
130
|
+
end
|
131
|
+
|
127
132
|
# Method supported by Vectorsearch DB to search for similar texts in the index
|
128
133
|
def similarity_search(...)
|
129
134
|
raise NotImplementedError, "#{self.class.name} does not support similarity search"
|
@@ -75,6 +75,17 @@ module Langchain::Vectorsearch
|
|
75
75
|
es_client.bulk(body: body)
|
76
76
|
end
|
77
77
|
|
78
|
+
# Remove a list of texts from the index
|
79
|
+
# @param ids [Array<Integer>] The list of ids to delete
|
80
|
+
# @return [Elasticsearch::Response] from the Elasticsearch server
|
81
|
+
def remove_texts(ids: [])
|
82
|
+
body = ids.map do |id|
|
83
|
+
{delete: {_index: index_name, _id: id}}
|
84
|
+
end
|
85
|
+
|
86
|
+
es_client.bulk(body: body)
|
87
|
+
end
|
88
|
+
|
78
89
|
# Create the index with the default schema
|
79
90
|
# @return [Elasticsearch::Response] Index creation
|
80
91
|
def create_default_schema
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-03-
|
11
|
+
date: 2024-03-17 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activesupport
|
@@ -698,12 +698,6 @@ files:
|
|
698
698
|
- lib/langchain/chunker/sentence.rb
|
699
699
|
- lib/langchain/chunker/text.rb
|
700
700
|
- lib/langchain/contextual_logger.rb
|
701
|
-
- lib/langchain/conversation.rb
|
702
|
-
- lib/langchain/conversation/context.rb
|
703
|
-
- lib/langchain/conversation/memory.rb
|
704
|
-
- lib/langchain/conversation/message.rb
|
705
|
-
- lib/langchain/conversation/prompt.rb
|
706
|
-
- lib/langchain/conversation/response.rb
|
707
701
|
- lib/langchain/data.rb
|
708
702
|
- lib/langchain/dependency_helper.rb
|
709
703
|
- lib/langchain/evals/ragas/answer_relevance.rb
|
@@ -1,83 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
class Conversation
|
5
|
-
class Memory
|
6
|
-
attr_reader :messages
|
7
|
-
|
8
|
-
# The least number of tokens we want to be under the limit by
|
9
|
-
TOKEN_LEEWAY = 20
|
10
|
-
|
11
|
-
def initialize(llm:, messages: [], **options)
|
12
|
-
warn "[DEPRECATION] `Langchain::Conversation::Memory` is deprecated. Please use `Langchain::Assistant` instead."
|
13
|
-
|
14
|
-
@llm = llm
|
15
|
-
@context = nil
|
16
|
-
@summary = nil
|
17
|
-
@messages = messages
|
18
|
-
@strategy = options.delete(:strategy) || :truncate
|
19
|
-
@options = options
|
20
|
-
end
|
21
|
-
|
22
|
-
def set_context(message)
|
23
|
-
@context = message
|
24
|
-
end
|
25
|
-
|
26
|
-
def append_message(message)
|
27
|
-
@messages.append(message)
|
28
|
-
end
|
29
|
-
|
30
|
-
def reduce_messages(exception)
|
31
|
-
case @strategy
|
32
|
-
when :truncate
|
33
|
-
truncate_messages(exception)
|
34
|
-
when :summarize
|
35
|
-
summarize_messages
|
36
|
-
else
|
37
|
-
raise "Unknown strategy: #{@options[:strategy]}"
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
|
-
def context
|
42
|
-
return if @context.nil? && @summary.nil?
|
43
|
-
|
44
|
-
Context.new([@context, @summary].compact.join("\n"))
|
45
|
-
end
|
46
|
-
|
47
|
-
private
|
48
|
-
|
49
|
-
def truncate_messages(exception)
|
50
|
-
raise exception if @messages.size == 1
|
51
|
-
|
52
|
-
token_overflow = exception.token_overflow
|
53
|
-
|
54
|
-
@messages = @messages.drop_while do |message|
|
55
|
-
proceed = token_overflow > -TOKEN_LEEWAY
|
56
|
-
token_overflow -= token_length(message.to_json, model_name, llm: @llm)
|
57
|
-
|
58
|
-
proceed
|
59
|
-
end
|
60
|
-
end
|
61
|
-
|
62
|
-
def summarize_messages
|
63
|
-
history = [@summary, @messages.to_json].compact.join("\n")
|
64
|
-
partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
|
65
|
-
|
66
|
-
@summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
|
67
|
-
|
68
|
-
@messages = [@messages.last]
|
69
|
-
end
|
70
|
-
|
71
|
-
def partition_messages
|
72
|
-
end
|
73
|
-
|
74
|
-
def model_name
|
75
|
-
@llm.class::DEFAULTS[:chat_completion_model_name]
|
76
|
-
end
|
77
|
-
|
78
|
-
def token_length(content, model_name, options)
|
79
|
-
@llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
|
80
|
-
end
|
81
|
-
end
|
82
|
-
end
|
83
|
-
end
|
@@ -1,50 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
class Conversation
|
5
|
-
class Message
|
6
|
-
attr_reader :content
|
7
|
-
|
8
|
-
ROLE_MAPPING = {
|
9
|
-
context: "system",
|
10
|
-
prompt: "user",
|
11
|
-
response: "assistant"
|
12
|
-
}
|
13
|
-
|
14
|
-
def initialize(content)
|
15
|
-
warn "[DEPRECATION] `Langchain::Conversation::*` is deprecated. Please use `Langchain::Assistant` and `Langchain::Messages` classes instead."
|
16
|
-
|
17
|
-
@content = content
|
18
|
-
end
|
19
|
-
|
20
|
-
def role
|
21
|
-
ROLE_MAPPING[type]
|
22
|
-
end
|
23
|
-
|
24
|
-
def to_s
|
25
|
-
content
|
26
|
-
end
|
27
|
-
|
28
|
-
def to_h
|
29
|
-
{
|
30
|
-
role: role,
|
31
|
-
content: content
|
32
|
-
}
|
33
|
-
end
|
34
|
-
|
35
|
-
def ==(other)
|
36
|
-
to_json == other.to_json
|
37
|
-
end
|
38
|
-
|
39
|
-
def to_json(options = {})
|
40
|
-
to_h.to_json
|
41
|
-
end
|
42
|
-
|
43
|
-
private
|
44
|
-
|
45
|
-
def type
|
46
|
-
self.class.to_s.split("::").last.downcase.to_sym
|
47
|
-
end
|
48
|
-
end
|
49
|
-
end
|
50
|
-
end
|
@@ -1,82 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain
|
4
|
-
#
|
5
|
-
# A high-level API for running a conversation with an LLM.
|
6
|
-
# Currently supports: OpenAI and Google PaLM LLMs.
|
7
|
-
#
|
8
|
-
# Usage:
|
9
|
-
# llm = Langchain::LLM::OpenAI.new(api_key: "YOUR_API_KEY")
|
10
|
-
# chat = Langchain::Conversation.new(llm: llm)
|
11
|
-
# chat.set_context("You are a chatbot from the future")
|
12
|
-
# chat.message("Tell me about future technologies")
|
13
|
-
#
|
14
|
-
# To stream the chat response:
|
15
|
-
# chat = Langchain::Conversation.new(llm: llm) do |chunk|
|
16
|
-
# print(chunk)
|
17
|
-
# end
|
18
|
-
#
|
19
|
-
class Conversation
|
20
|
-
attr_reader :options
|
21
|
-
|
22
|
-
# Intialize Conversation with a LLM
|
23
|
-
#
|
24
|
-
# @param llm [Object] The LLM to use for the conversation
|
25
|
-
# @param options [Hash] Options to pass to the LLM, like temperature, top_k, etc.
|
26
|
-
# @return [Langchain::Conversation] The Langchain::Conversation instance
|
27
|
-
def initialize(llm:, **options, &block)
|
28
|
-
warn "[DEPRECATION] `Langchain::Conversation` is deprecated. Please use `Langchain::Assistant` instead."
|
29
|
-
|
30
|
-
@llm = llm
|
31
|
-
@context = nil
|
32
|
-
@memory = ::Langchain::Conversation::Memory.new(
|
33
|
-
llm: llm,
|
34
|
-
messages: options.delete(:messages) || [],
|
35
|
-
strategy: options.delete(:memory_strategy)
|
36
|
-
)
|
37
|
-
@options = options
|
38
|
-
@block = block
|
39
|
-
end
|
40
|
-
|
41
|
-
# Set the context of the conversation. Usually used to set the model's persona.
|
42
|
-
# @param message [String] The context of the conversation
|
43
|
-
def set_context(message)
|
44
|
-
@memory.set_context ::Langchain::Conversation::Context.new(message)
|
45
|
-
end
|
46
|
-
|
47
|
-
# Message the model with a prompt and return the response.
|
48
|
-
# @param message [String] The prompt to message the model with
|
49
|
-
# @return [Response] The response from the model
|
50
|
-
def message(message)
|
51
|
-
@memory.append_message ::Langchain::Conversation::Prompt.new(message)
|
52
|
-
ai_message = ::Langchain::Conversation::Response.new(llm_response.chat_completion)
|
53
|
-
@memory.append_message(ai_message)
|
54
|
-
ai_message
|
55
|
-
end
|
56
|
-
|
57
|
-
# Messages from conversation memory
|
58
|
-
# @return [Array<Prompt|Response>] The messages from the conversation memory
|
59
|
-
def messages
|
60
|
-
@memory.messages
|
61
|
-
end
|
62
|
-
|
63
|
-
# Context from conversation memory
|
64
|
-
# @return [Context] Context from conversation memory
|
65
|
-
def context
|
66
|
-
@memory.context
|
67
|
-
end
|
68
|
-
|
69
|
-
private
|
70
|
-
|
71
|
-
def llm_response
|
72
|
-
message_history = messages.map(&:to_h)
|
73
|
-
# Prepend the system message as context as the first message
|
74
|
-
message_history.prepend({role: "system", content: @memory.context.to_s}) if @memory.context
|
75
|
-
|
76
|
-
@llm.chat(messages: message_history, **@options, &@block)
|
77
|
-
rescue Langchain::Utils::TokenLength::TokenLimitExceeded => exception
|
78
|
-
@memory.reduce_messages(exception)
|
79
|
-
retry
|
80
|
-
end
|
81
|
-
end
|
82
|
-
end
|