langchainrb 0.6.16 → 0.6.17

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 36e0bec4ad6abfd9077c9e7f2d6166ba99acb7dc3859749ee6facfb9409e6379
4
- data.tar.gz: 6bd8d3de4f1d31b718381fcef1c21a8b417b2bd8483d7fdc2610cfda3b60a50e
3
+ metadata.gz: 3b9bca59bfb5909f6ac24ebf6dba6074f5faf3d2cdadab1a3b3a8a0f75f98adc
4
+ data.tar.gz: a202726d383d2dc691cb4146e9b36cb7ea6f8ac35382a3df67f6e11d35b3562e
5
5
  SHA512:
6
- metadata.gz: ed7be8f193d44075f701622fd991127ab32580293fb6d1ab7ccc096eeff8704312ad34cdb7a4cfd09cf8879116ede17a5b017fe15851b9ee78cb159b7e8d8b59
7
- data.tar.gz: f70d7a3707ed7fce123c2f9158c338cda3aa38a46abf5598f7d05c6ccd63d5a16a37ba10ff0a7a0a4cd17c0c2aeb2f07a07842a41f16322c48c7c9bae522dda4
6
+ metadata.gz: b4eaf631f22236035c9e29b3618a70d14487cc9e39b6885e44497ebad2a98670ce88997fdb25144b6467e0caa69a04ce7e625c9e10bc88322131181c2254a570
7
+ data.tar.gz: 981199fe2a0123e46ac3af54946c03d5eaa827473eae02f2e60accd0c680a0bbd40741800e05b79e890038523a1b910502a6cf4ed1f4ebf77845f4b2a2dbc5d9
data/README.md CHANGED
@@ -59,7 +59,7 @@ client = Langchain::Vectorsearch::Weaviate.new(
59
59
  )
60
60
 
61
61
  # You can instantiate any other supported vector search database:
62
- client = Langchain::Vectorsearch::Chroma.new(...) # `gem "chroma-db", "~> 0.3.0"`
62
+ client = Langchain::Vectorsearch::Chroma.new(...) # `gem "chroma-db", "~> 0.6.0"`
63
63
  client = Langchain::Vectorsearch::Hnswlib.new(...) # `gem "hnswlib", "~> 0.8.1"`
64
64
  client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.2"`
65
65
  client = Langchain::Vectorsearch::Pinecone.new(...) # `gem "pinecone", "~> 0.1.6"`
@@ -8,7 +8,10 @@ module Langchain
8
8
  #
9
9
  # == Available chunkers
10
10
  #
11
+ # - {Langchain::Chunker::RecursiveText}
11
12
  # - {Langchain::Chunker::Text}
13
+ # - {Langchain::Chunker::Semantic}
14
+ # - {Langchain::Chunker::Sentence}
12
15
  class Base
13
16
  end
14
17
  end
@@ -0,0 +1,8 @@
1
+ _type: prompt
2
+ input_variables:
3
+ - text
4
+ template: |
5
+ Please split the following text by topics.
6
+ Output only the paragraphs delimited by "---":
7
+
8
+ {text}
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ module Chunker
5
+ #
6
+ # LLM-powered semantic chunker.
7
+ # Semantic chunking is a technique of splitting texts by their semantic meaning, e.g.: themes, topics, and ideas.
8
+ # We use an LLM to accomplish this. The Anthropic LLM is highly recommended for this task as it has the longest context window (100k tokens).
9
+ #
10
+ # Usage:
11
+ # Langchain::Chunker::Semantic.new(
12
+ # text,
13
+ # llm: Langchain::LLM::Anthropic.new(api_key: ENV["ANTHROPIC_API_KEY"])
14
+ # ).chunks
15
+ #
16
+ class Semantic < Base
17
+ attr_reader :text, :llm, :prompt_template
18
+ # @param [Langchain::LLM::Base] Langchain::LLM::* instance
19
+ # @param [Langchain::Prompt::PromptTemplate] Optional custom prompt template
20
+ def initialize(text, llm:, prompt_template: nil)
21
+ @text = text
22
+ @llm = llm
23
+ @prompt_template = prompt_template || default_prompt_template
24
+ end
25
+
26
+ # @return [Array<String>]
27
+ def chunks
28
+ prompt = prompt_template.format(text: text)
29
+
30
+ # Replace static 50k limit with dynamic limit based on text length (max_tokens_to_sample)
31
+ completion = llm.complete(prompt: prompt, max_tokens_to_sample: 50000)
32
+ completion
33
+ .gsub("Here are the paragraphs split by topic:\n\n", "")
34
+ .split("---")
35
+ .map(&:strip)
36
+ .reject(&:empty?)
37
+ end
38
+
39
+ private
40
+
41
+ # @return [Langchain::Prompt::PromptTemplate] Default prompt template for semantic chunking
42
+ def default_prompt_template
43
+ Langchain::Prompt.load_from_path(
44
+ file_path: Langchain.root.join("langchain/chunker/prompts/semantic_prompt_template.yml")
45
+ )
46
+ end
47
+ end
48
+ end
49
+ end
@@ -1,9 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- class AIMessage < Message
5
- def type
6
- "ai"
4
+ class Conversation
5
+ class Context < Message
7
6
  end
8
7
  end
9
8
  end
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Conversation
5
+ class Memory
6
+ attr_reader :examples, :messages
7
+
8
+ # The least number of tokens we want to be under the limit by
9
+ TOKEN_LEEWAY = 20
10
+
11
+ def initialize(llm:, messages: [], **options)
12
+ @llm = llm
13
+ @context = nil
14
+ @summary = nil
15
+ @examples = []
16
+ @messages = messages
17
+ @strategy = options.delete(:strategy) || :truncate
18
+ @options = options
19
+ end
20
+
21
+ def set_context(message)
22
+ @context = message
23
+ end
24
+
25
+ def add_examples(examples)
26
+ @examples.concat examples
27
+ end
28
+
29
+ def append_message(message)
30
+ @messages.append(message)
31
+ end
32
+
33
+ def reduce_messages(exception)
34
+ case @strategy
35
+ when :truncate
36
+ truncate_messages(exception)
37
+ when :summarize
38
+ summarize_messages
39
+ else
40
+ raise "Unknown strategy: #{@options[:strategy]}"
41
+ end
42
+ end
43
+
44
+ def context
45
+ return if @context.nil? && @summary.nil?
46
+
47
+ Context.new([@context, @summary].compact.join("\n"))
48
+ end
49
+
50
+ private
51
+
52
+ def truncate_messages(exception)
53
+ raise exception if @messages.size == 1
54
+
55
+ token_overflow = exception.token_overflow
56
+
57
+ @messages = @messages.drop_while do |message|
58
+ proceed = token_overflow > -TOKEN_LEEWAY
59
+ token_overflow -= token_length(message.to_json, model_name, llm: @llm)
60
+
61
+ proceed
62
+ end
63
+ end
64
+
65
+ def summarize_messages
66
+ history = [@summary, @messages.to_json].compact.join("\n")
67
+ partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
68
+
69
+ @summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
70
+
71
+ @messages = [@messages.last]
72
+ end
73
+
74
+ def partition_messages
75
+ end
76
+
77
+ def model_name
78
+ @llm.class::DEFAULTS[:chat_completion_model_name]
79
+ end
80
+
81
+ def token_length(content, model_name, options)
82
+ @llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
83
+ end
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Conversation
5
+ class Message
6
+ attr_reader :content
7
+
8
+ ROLE_MAPPING = {
9
+ context: "system",
10
+ prompt: "user",
11
+ response: "assistant"
12
+ }
13
+
14
+ def initialize(content)
15
+ @content = content
16
+ end
17
+
18
+ def role
19
+ ROLE_MAPPING[type]
20
+ end
21
+
22
+ def to_s
23
+ content
24
+ end
25
+
26
+ def to_h
27
+ {
28
+ role: role,
29
+ content: content
30
+ }
31
+ end
32
+
33
+ def ==(other)
34
+ to_json == other.to_json
35
+ end
36
+
37
+ def to_json(options = {})
38
+ to_h.to_json
39
+ end
40
+
41
+ private
42
+
43
+ def type
44
+ self.class.to_s.split("::").last.downcase.to_sym
45
+ end
46
+ end
47
+ end
48
+ end
@@ -1,9 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- class HumanMessage < Message
5
- def type
6
- "human"
4
+ class Conversation
5
+ class Prompt < Message
7
6
  end
8
7
  end
9
8
  end
@@ -1,9 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- class SystemMessage < Message
5
- def type
6
- "system"
4
+ class Conversation
5
+ class Response < Message
7
6
  end
8
7
  end
9
8
  end
@@ -28,7 +28,7 @@ module Langchain
28
28
  @llm = llm
29
29
  @context = nil
30
30
  @examples = []
31
- @memory = ConversationMemory.new(
31
+ @memory = ::Langchain::Conversation::Memory.new(
32
32
  llm: llm,
33
33
  messages: options.delete(:messages) || [],
34
34
  strategy: options.delete(:memory_strategy)
@@ -44,48 +44,47 @@ module Langchain
44
44
  # Set the context of the conversation. Usually used to set the model's persona.
45
45
  # @param message [String] The context of the conversation
46
46
  def set_context(message)
47
- @memory.set_context SystemMessage.new(message)
47
+ @memory.set_context ::Langchain::Conversation::Context.new(message)
48
48
  end
49
49
 
50
50
  # Add examples to the conversation. Used to give the model a sense of the conversation.
51
- # @param examples [Array<AIMessage|HumanMessage>] The examples to add to the conversation
51
+ # @param examples [Array<Prompt|Response>] The examples to add to the conversation
52
52
  def add_examples(examples)
53
53
  @memory.add_examples examples
54
54
  end
55
55
 
56
56
  # Message the model with a prompt and return the response.
57
57
  # @param message [String] The prompt to message the model with
58
- # @return [AIMessage] The response from the model
58
+ # @return [Response] The response from the model
59
59
  def message(message)
60
- human_message = HumanMessage.new(message)
61
- @memory.append_message(human_message)
62
- ai_message = llm_response(human_message)
60
+ @memory.append_message ::Langchain::Conversation::Prompt.new(message)
61
+ ai_message = ::Langchain::Conversation::Response.new(llm_response)
63
62
  @memory.append_message(ai_message)
64
63
  ai_message
65
64
  end
66
65
 
67
66
  # Messages from conversation memory
68
- # @return [Array<AIMessage|HumanMessage>] The messages from the conversation memory
67
+ # @return [Array<Prompt|Response>] The messages from the conversation memory
69
68
  def messages
70
69
  @memory.messages
71
70
  end
72
71
 
73
72
  # Context from conversation memory
74
- # @return [SystemMessage] Context from conversation memory
73
+ # @return [Context] Context from conversation memory
75
74
  def context
76
75
  @memory.context
77
76
  end
78
77
 
79
78
  # Examples from conversation memory
80
- # @return [Array<AIMessage|HumanMessage>] Examples from the conversation memory
79
+ # @return [Array<Prompt|Response>] Examples from the conversation memory
81
80
  def examples
82
81
  @memory.examples
83
82
  end
84
83
 
85
84
  private
86
85
 
87
- def llm_response(prompt)
88
- @llm.chat(messages: @memory.messages, context: @memory.context, examples: @memory.examples, **@options, &@block)
86
+ def llm_response
87
+ @llm.chat(messages: @memory.messages.map(&:to_h), context: @memory.context&.to_s, examples: @memory.examples.map(&:to_h), **@options, &@block)
89
88
  rescue Langchain::Utils::TokenLength::TokenLimitExceeded => exception
90
89
  @memory.reduce_messages(exception)
91
90
  retry
@@ -70,7 +70,7 @@ module Langchain::LLM
70
70
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
71
71
  def chat(...)
72
72
  response_text = complete(...)
73
- Langchain::AIMessage.new(response_text)
73
+ ::Langchain::Conversation::Response.new(response_text)
74
74
  end
75
75
 
76
76
  # Generate a summary in English for a given text
@@ -20,7 +20,7 @@ module Langchain::LLM
20
20
  }.freeze
21
21
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::GooglePalmValidator
22
22
  ROLE_MAPPING = {
23
- "human" => "user"
23
+ "assistant" => "ai"
24
24
  }
25
25
 
26
26
  def initialize(api_key:, default_options: {})
@@ -74,12 +74,12 @@ module Langchain::LLM
74
74
  #
75
75
  # Generate a chat completion for a given prompt
76
76
  #
77
- # @param prompt [HumanMessage] The prompt to generate a chat completion for
78
- # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
79
- # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
80
- # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
77
+ # @param prompt [String] The prompt to generate a chat completion for
78
+ # @param messages [Array<Hash>] The messages that have been sent in the conversation
79
+ # @param context [String] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
80
+ # @param examples [Array<Hash>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
81
81
  # @param options [Hash] extra parameters passed to GooglePalmAPI::Client#generate_chat_message
82
- # @return [AIMessage] The chat completion
82
+ # @return [String] The chat completion
83
83
  #
84
84
  def chat(prompt: "", messages: [], context: "", examples: [], **options)
85
85
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
@@ -87,7 +87,7 @@ module Langchain::LLM
87
87
  default_params = {
88
88
  temperature: @defaults[:temperature],
89
89
  model: @defaults[:chat_completion_model_name],
90
- context: context.to_s,
90
+ context: context,
91
91
  messages: compose_chat_messages(prompt: prompt, messages: messages),
92
92
  examples: compose_examples(examples)
93
93
  }
@@ -108,7 +108,7 @@ module Langchain::LLM
108
108
  response = client.generate_chat_message(**default_params)
109
109
  raise "GooglePalm API returned an error: #{response}" if response.dig("error")
110
110
 
111
- Langchain::AIMessage.new(response.dig("candidates", 0, "content"))
111
+ response.dig("candidates", 0, "content")
112
112
  end
113
113
 
114
114
  #
@@ -150,8 +150,8 @@ module Langchain::LLM
150
150
  def compose_examples(examples)
151
151
  examples.each_slice(2).map do |example|
152
152
  {
153
- input: {content: example.first.content},
154
- output: {content: example.last.content}
153
+ input: {content: example.first[:content]},
154
+ output: {content: example.last[:content]}
155
155
  }
156
156
  end
157
157
  end
@@ -159,8 +159,8 @@ module Langchain::LLM
159
159
  def transform_messages(messages)
160
160
  messages.map do |message|
161
161
  {
162
- author: ROLE_MAPPING.fetch(message.type, message.type),
163
- content: message.content
162
+ author: ROLE_MAPPING.fetch(message[:role], message[:role]),
163
+ content: message[:content]
164
164
  }
165
165
  end
166
166
  end
@@ -11,6 +11,7 @@ module Langchain::LLM
11
11
  #
12
12
  class OpenAI < Base
13
13
  DEFAULTS = {
14
+ n: 1,
14
15
  temperature: 0.0,
15
16
  completion_model_name: "gpt-3.5-turbo",
16
17
  chat_completion_model_name: "gpt-3.5-turbo",
@@ -26,10 +27,6 @@ module Langchain::LLM
26
27
  ].freeze
27
28
 
28
29
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
29
- ROLE_MAPPING = {
30
- "ai" => "assistant",
31
- "human" => "user"
32
- }
33
30
 
34
31
  attr_accessor :functions
35
32
 
@@ -117,18 +114,18 @@ module Langchain::LLM
117
114
  # },
118
115
  # ]
119
116
  #
120
- # @param prompt [HumanMessage] The prompt to generate a chat completion for
121
- # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
122
- # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
123
- # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
117
+ # @param prompt [String] The prompt to generate a chat completion for
118
+ # @param messages [Array<Hash>] The messages that have been sent in the conversation
119
+ # @param context [String] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
120
+ # @param examples [Array<Hash>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
124
121
  # @param options [Hash] extra parameters passed to OpenAI::Client#chat
125
- # @yield [AIMessage] Stream responses back one String at a time
126
- # @return [AIMessage] The chat completion
122
+ # @yield [Hash] Stream responses back one token at a time
123
+ # @return [String|Array<String>] The chat completion
127
124
  #
128
- def chat(prompt: "", messages: [], context: "", examples: [], **options)
125
+ def chat(prompt: "", messages: [], context: "", examples: [], **options, &block)
129
126
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
130
127
 
131
- parameters = compose_parameters @defaults[:chat_completion_model_name], options
128
+ parameters = compose_parameters @defaults[:chat_completion_model_name], options, &block
132
129
  parameters[:messages] = compose_chat_messages(prompt: prompt, messages: messages, context: context, examples: examples)
133
130
 
134
131
  if functions
@@ -137,25 +134,11 @@ module Langchain::LLM
137
134
  parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
138
135
  end
139
136
 
140
- if (streaming = block_given?)
141
- parameters[:stream] = proc do |chunk, _bytesize|
142
- delta = chunk.dig("choices", 0, "delta")
143
- content = delta["content"]
144
- additional_kwargs = {function_call: delta["function_call"]}.compact
145
- yield Langchain::AIMessage.new(content, additional_kwargs)
146
- end
147
- end
137
+ response = with_api_error_handling { client.chat(parameters: parameters) }
148
138
 
149
- response = with_api_error_handling do
150
- client.chat(parameters: parameters)
151
- end
139
+ return if block
152
140
 
153
- unless streaming
154
- message = response.dig("choices", 0, "message")
155
- content = message["content"]
156
- additional_kwargs = {function_call: message["function_call"]}.compact
157
- Langchain::AIMessage.new(content.to_s, additional_kwargs)
158
- end
141
+ extract_response response
159
142
  end
160
143
 
161
144
  #
@@ -191,12 +174,18 @@ module Langchain::LLM
191
174
  response.dig("choices", 0, "text")
192
175
  end
193
176
 
194
- def compose_parameters(model, params)
195
- default_params = {model: model, temperature: @defaults[:temperature]}
196
-
177
+ def compose_parameters(model, params, &block)
178
+ default_params = {model: model, temperature: @defaults[:temperature], n: @defaults[:n]}
197
179
  default_params[:stop] = params.delete(:stop_sequences) if params[:stop_sequences]
180
+ parameters = default_params.merge(params)
198
181
 
199
- default_params.merge(params)
182
+ if block
183
+ parameters[:stream] = proc do |chunk, _bytesize|
184
+ yield chunk.dig("choices", 0)
185
+ end
186
+ end
187
+
188
+ parameters
200
189
  end
201
190
 
202
191
  def compose_chat_messages(prompt:, messages: [], context: "", examples: [])
@@ -206,9 +195,9 @@ module Langchain::LLM
206
195
 
207
196
  history.concat transform_messages(messages) unless messages.empty?
208
197
 
209
- unless context.nil? || context.to_s.empty?
198
+ unless context.nil? || context.empty?
210
199
  history.reject! { |message| message[:role] == "system" }
211
- history.prepend({role: "system", content: context.content})
200
+ history.prepend({role: "system", content: context})
212
201
  end
213
202
 
214
203
  unless prompt.empty?
@@ -225,14 +214,16 @@ module Langchain::LLM
225
214
  def transform_messages(messages)
226
215
  messages.map do |message|
227
216
  {
228
- role: ROLE_MAPPING.fetch(message.type, message.type),
229
- content: message.content
217
+ role: message[:role],
218
+ content: message[:content]
230
219
  }
231
220
  end
232
221
  end
233
222
 
234
223
  def with_api_error_handling
235
224
  response = yield
225
+ return if response.empty?
226
+
236
227
  raise Langchain::LLM::ApiError.new "OpenAI API error: #{response.dig("error", "message")}" if response&.dig("error")
237
228
 
238
229
  response
@@ -241,5 +232,10 @@ module Langchain::LLM
241
232
  def validate_max_tokens(messages, model)
242
233
  LENGTH_VALIDATOR.validate_max_tokens!(messages, model)
243
234
  end
235
+
236
+ def extract_response(response)
237
+ results = response.dig("choices").map { |choice| choice.dig("message", "content") }
238
+ (results.size == 1) ? results.first : results
239
+ end
244
240
  end
245
241
  end
@@ -84,7 +84,7 @@ module Langchain::LLM
84
84
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
85
85
  def chat(...)
86
86
  response_text = complete(...)
87
- Langchain::AIMessage.new(response_text)
87
+ ::Langchain::Conversation::Response.new(response_text)
88
88
  end
89
89
 
90
90
  #
@@ -5,18 +5,17 @@ module Langchain::Vectorsearch
5
5
  #
6
6
  # Wrapper around Chroma DB
7
7
  #
8
- # Gem requirements: gem "chroma-db", "~> 0.3.0"
8
+ # Gem requirements: gem "chroma-db", "~> 0.6.0"
9
9
  #
10
10
  # Usage:
11
11
  # chroma = Langchain::Vectorsearch::Chroma.new(url:, index_name:, llm:, llm_api_key:, api_key: nil)
12
12
  #
13
13
 
14
14
  # Initialize the Chroma client
15
- # @param url [String] The URL of the Qdrant server
16
- # @param api_key [String] The API key to use
15
+ # @param url [String] The URL of the Chroma server
17
16
  # @param index_name [String] The name of the index to use
18
17
  # @param llm [Object] The LLM client to use
19
- def initialize(url:, index_name:, llm:, api_key: nil)
18
+ def initialize(url:, index_name:, llm:)
20
19
  depends_on "chroma-db"
21
20
 
22
21
  ::Chroma.connect_host = url
@@ -61,19 +60,19 @@ module Langchain::Vectorsearch
61
60
  end
62
61
 
63
62
  # Create the collection with the default schema
64
- # @return [Hash] The response from the server
63
+ # @return [::Chroma::Resources::Collection] Created collection
65
64
  def create_default_schema
66
65
  ::Chroma::Resources::Collection.create(index_name)
67
66
  end
68
67
 
69
68
  # Get the default schema
70
- # @return [Hash] The response from the server
69
+ # @return [::Chroma::Resources::Collection] Default schema
71
70
  def get_default_schema
72
71
  ::Chroma::Resources::Collection.get(index_name)
73
72
  end
74
73
 
75
74
  # Delete the default schema
76
- # @return [Hash] The response from the server
75
+ # @return [bool] Success or failure
77
76
  def destroy_default_schema
78
77
  ::Chroma::Resources::Collection.delete(index_name)
79
78
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.16"
4
+ VERSION = "0.6.17"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -7,7 +7,6 @@ require "zeitwerk"
7
7
  loader = Zeitwerk::Loader.for_gem
8
8
  loader.ignore("#{__dir__}/langchainrb.rb")
9
9
  loader.inflector.inflect(
10
- "ai_message" => "AIMessage",
11
10
  "ai21" => "AI21",
12
11
  "ai21_validator" => "AI21Validator",
13
12
  "csv" => "CSV",
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.16
4
+ version: 0.6.17
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-10-03 00:00:00.000000000 Z
11
+ date: 2023-10-10 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -184,14 +184,14 @@ dependencies:
184
184
  requirements:
185
185
  - - "~>"
186
186
  - !ruby/object:Gem::Version
187
- version: 0.3.0
187
+ version: 0.6.0
188
188
  type: :development
189
189
  prerelease: false
190
190
  version_requirements: !ruby/object:Gem::Requirement
191
191
  requirements:
192
192
  - - "~>"
193
193
  - !ruby/object:Gem::Version
194
- version: 0.3.0
194
+ version: 0.6.0
195
195
  - !ruby/object:Gem::Dependency
196
196
  name: cohere-ruby
197
197
  requirement: !ruby/object:Gem::Requirement
@@ -492,14 +492,14 @@ dependencies:
492
492
  requirements:
493
493
  - - "~>"
494
494
  - !ruby/object:Gem::Version
495
- version: 0.8.7
495
+ version: 0.8.9
496
496
  type: :development
497
497
  prerelease: false
498
498
  version_requirements: !ruby/object:Gem::Requirement
499
499
  requirements:
500
500
  - - "~>"
501
501
  - !ruby/object:Gem::Version
502
- version: 0.8.7
502
+ version: 0.8.9
503
503
  - !ruby/object:Gem::Dependency
504
504
  name: wikipedia-client
505
505
  requirement: !ruby/object:Gem::Requirement
@@ -532,17 +532,21 @@ files:
532
532
  - lib/langchain/agent/sql_query_agent.rb
533
533
  - lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml
534
534
  - lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml
535
- - lib/langchain/ai_message.rb
536
535
  - lib/langchain/chunker/base.rb
536
+ - lib/langchain/chunker/prompts/semantic_prompt_template.yml
537
537
  - lib/langchain/chunker/recursive_text.rb
538
+ - lib/langchain/chunker/semantic.rb
538
539
  - lib/langchain/chunker/sentence.rb
539
540
  - lib/langchain/chunker/text.rb
540
541
  - lib/langchain/contextual_logger.rb
541
542
  - lib/langchain/conversation.rb
542
- - lib/langchain/conversation_memory.rb
543
+ - lib/langchain/conversation/context.rb
544
+ - lib/langchain/conversation/memory.rb
545
+ - lib/langchain/conversation/message.rb
546
+ - lib/langchain/conversation/prompt.rb
547
+ - lib/langchain/conversation/response.rb
543
548
  - lib/langchain/data.rb
544
549
  - lib/langchain/dependency_helper.rb
545
- - lib/langchain/human_message.rb
546
550
  - lib/langchain/llm/ai21.rb
547
551
  - lib/langchain/llm/anthropic.rb
548
552
  - lib/langchain/llm/base.rb
@@ -555,7 +559,6 @@ files:
555
559
  - lib/langchain/llm/prompts/summarize_template.yaml
556
560
  - lib/langchain/llm/replicate.rb
557
561
  - lib/langchain/loader.rb
558
- - lib/langchain/message.rb
559
562
  - lib/langchain/output_parsers/base.rb
560
563
  - lib/langchain/output_parsers/output_fixing_parser.rb
561
564
  - lib/langchain/output_parsers/prompts/naive_fix_prompt.yaml
@@ -575,7 +578,6 @@ files:
575
578
  - lib/langchain/prompt/loading.rb
576
579
  - lib/langchain/prompt/prompt_template.rb
577
580
  - lib/langchain/railtie.rb
578
- - lib/langchain/system_message.rb
579
581
  - lib/langchain/tool/base.rb
580
582
  - lib/langchain/tool/calculator.rb
581
583
  - lib/langchain/tool/database.rb
@@ -1,84 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class ConversationMemory
5
- attr_reader :examples, :messages
6
-
7
- # The least number of tokens we want to be under the limit by
8
- TOKEN_LEEWAY = 20
9
-
10
- def initialize(llm:, messages: [], **options)
11
- @llm = llm
12
- @context = nil
13
- @summary = nil
14
- @examples = []
15
- @messages = messages
16
- @strategy = options.delete(:strategy) || :truncate
17
- @options = options
18
- end
19
-
20
- def set_context(message)
21
- @context = message
22
- end
23
-
24
- def add_examples(examples)
25
- @examples.concat examples
26
- end
27
-
28
- def append_message(message)
29
- @messages.append(message)
30
- end
31
-
32
- def reduce_messages(exception)
33
- case @strategy
34
- when :truncate
35
- truncate_messages(exception)
36
- when :summarize
37
- summarize_messages
38
- else
39
- raise "Unknown strategy: #{@options[:strategy]}"
40
- end
41
- end
42
-
43
- def context
44
- return if @context.nil? && @summary.nil?
45
-
46
- SystemMessage.new([@context, @summary].compact.join("\n"))
47
- end
48
-
49
- private
50
-
51
- def truncate_messages(exception)
52
- raise exception if @messages.size == 1
53
-
54
- token_overflow = exception.token_overflow
55
-
56
- @messages = @messages.drop_while do |message|
57
- proceed = token_overflow > -TOKEN_LEEWAY
58
- token_overflow -= token_length(message.to_json, model_name, llm: @llm)
59
-
60
- proceed
61
- end
62
- end
63
-
64
- def summarize_messages
65
- history = [@summary, @messages.to_json].compact.join("\n")
66
- partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
67
-
68
- @summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
69
-
70
- @messages = [@messages.last]
71
- end
72
-
73
- def partition_messages
74
- end
75
-
76
- def model_name
77
- @llm.class::DEFAULTS[:chat_completion_model_name]
78
- end
79
-
80
- def token_length(content, model_name, options)
81
- @llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
82
- end
83
- end
84
- end
@@ -1,35 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Message
5
- attr_reader :content, :additional_kwargs
6
-
7
- def initialize(content, additional_kwargs = nil)
8
- @content = content
9
- @additional_kwargs = additional_kwargs
10
- end
11
-
12
- def type
13
- raise NotImplementedError
14
- end
15
-
16
- def to_s
17
- content
18
- end
19
-
20
- def ==(other)
21
- to_json == other.to_json
22
- end
23
-
24
- def to_json(options = {})
25
- hash = {
26
- type: type,
27
- content: content
28
- }
29
-
30
- hash[:additional_kwargs] = additional_kwargs unless additional_kwargs.nil? || additional_kwargs.empty?
31
-
32
- hash.to_json
33
- end
34
- end
35
- end