langchainrb 0.6.16 → 0.6.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +11 -0
  3. data/README.md +16 -1
  4. data/lib/langchain/active_record/hooks.rb +14 -0
  5. data/lib/langchain/agent/react_agent.rb +1 -1
  6. data/lib/langchain/agent/sql_query_agent.rb +2 -2
  7. data/lib/langchain/chunk.rb +16 -0
  8. data/lib/langchain/chunker/base.rb +7 -0
  9. data/lib/langchain/chunker/prompts/semantic_prompt_template.yml +8 -0
  10. data/lib/langchain/chunker/recursive_text.rb +5 -2
  11. data/lib/langchain/chunker/semantic.rb +52 -0
  12. data/lib/langchain/chunker/sentence.rb +4 -2
  13. data/lib/langchain/chunker/text.rb +5 -2
  14. data/lib/langchain/{ai_message.rb → conversation/context.rb} +2 -3
  15. data/lib/langchain/conversation/memory.rb +86 -0
  16. data/lib/langchain/conversation/message.rb +48 -0
  17. data/lib/langchain/{human_message.rb → conversation/prompt.rb} +2 -3
  18. data/lib/langchain/{system_message.rb → conversation/response.rb} +2 -3
  19. data/lib/langchain/conversation.rb +11 -12
  20. data/lib/langchain/llm/ai21.rb +4 -3
  21. data/lib/langchain/llm/anthropic.rb +3 -3
  22. data/lib/langchain/llm/cohere.rb +7 -6
  23. data/lib/langchain/llm/google_palm.rb +24 -20
  24. data/lib/langchain/llm/hugging_face.rb +4 -3
  25. data/lib/langchain/llm/llama_cpp.rb +1 -1
  26. data/lib/langchain/llm/ollama.rb +18 -6
  27. data/lib/langchain/llm/openai.rb +38 -41
  28. data/lib/langchain/llm/replicate.rb +7 -11
  29. data/lib/langchain/llm/response/ai21_response.rb +13 -0
  30. data/lib/langchain/llm/response/anthropic_response.rb +29 -0
  31. data/lib/langchain/llm/response/base_response.rb +79 -0
  32. data/lib/langchain/llm/response/cohere_response.rb +21 -0
  33. data/lib/langchain/llm/response/google_palm_response.rb +36 -0
  34. data/lib/langchain/llm/response/hugging_face_response.rb +13 -0
  35. data/lib/langchain/llm/response/ollama_response.rb +26 -0
  36. data/lib/langchain/llm/response/openai_response.rb +51 -0
  37. data/lib/langchain/llm/response/replicate_response.rb +28 -0
  38. data/lib/langchain/vectorsearch/base.rb +1 -1
  39. data/lib/langchain/vectorsearch/chroma.rb +11 -12
  40. data/lib/langchain/vectorsearch/hnswlib.rb +5 -5
  41. data/lib/langchain/vectorsearch/milvus.rb +2 -2
  42. data/lib/langchain/vectorsearch/pgvector.rb +3 -3
  43. data/lib/langchain/vectorsearch/pinecone.rb +10 -10
  44. data/lib/langchain/vectorsearch/qdrant.rb +5 -5
  45. data/lib/langchain/vectorsearch/weaviate.rb +6 -6
  46. data/lib/langchain/version.rb +1 -1
  47. data/lib/langchain.rb +3 -1
  48. metadata +23 -11
  49. data/lib/langchain/conversation_memory.rb +0 -84
  50. data/lib/langchain/message.rb +0 -35
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ class OpenAIResponse < BaseResponse
5
+ def model
6
+ raw_response["model"]
7
+ end
8
+
9
+ def created_at
10
+ if raw_response.dig("created")
11
+ Time.at(raw_response.dig("created"))
12
+ end
13
+ end
14
+
15
+ def completion
16
+ completions&.dig(0, "message", "content")
17
+ end
18
+
19
+ def chat_completion
20
+ completion
21
+ end
22
+
23
+ def embedding
24
+ embeddings&.first
25
+ end
26
+
27
+ def completions
28
+ raw_response.dig("choices")
29
+ end
30
+
31
+ def chat_completions
32
+ raw_response.dig("choices")
33
+ end
34
+
35
+ def embeddings
36
+ raw_response.dig("data")&.map { |datum| datum.dig("embedding") }
37
+ end
38
+
39
+ def prompt_tokens
40
+ raw_response.dig("usage", "prompt_tokens")
41
+ end
42
+
43
+ def completion_tokens
44
+ raw_response.dig("usage", "completion_tokens")
45
+ end
46
+
47
+ def total_tokens
48
+ raw_response.dig("usage", "total_tokens")
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain::LLM
4
+ class ReplicateResponse < BaseResponse
5
+ def completions
6
+ # Response comes back as an array of strings, e.g.: ["Hi", "how ", "are ", "you?"]
7
+ # The first array element is missing a space at the end, so we add it manually
8
+ raw_response.output[0] += " "
9
+ [raw_response.output.join]
10
+ end
11
+
12
+ def completion
13
+ completions.first
14
+ end
15
+
16
+ def created_at
17
+ Time.parse(raw_response.created_at)
18
+ end
19
+
20
+ def embedding
21
+ embeddings.first
22
+ end
23
+
24
+ def embeddings
25
+ [raw_response.output]
26
+ end
27
+ end
28
+ end
@@ -184,7 +184,7 @@ module Langchain::Vectorsearch
184
184
  .flatten
185
185
  .map do |path|
186
186
  data = Langchain::Loader.new(path)&.load&.chunks
187
- data.map { |chunk| chunk[:text] }
187
+ data.map { |chunk| chunk.text }
188
188
  end
189
189
 
190
190
  texts.flatten!
@@ -5,18 +5,17 @@ module Langchain::Vectorsearch
5
5
  #
6
6
  # Wrapper around Chroma DB
7
7
  #
8
- # Gem requirements: gem "chroma-db", "~> 0.3.0"
8
+ # Gem requirements: gem "chroma-db", "~> 0.6.0"
9
9
  #
10
10
  # Usage:
11
11
  # chroma = Langchain::Vectorsearch::Chroma.new(url:, index_name:, llm:, llm_api_key:, api_key: nil)
12
12
  #
13
13
 
14
14
  # Initialize the Chroma client
15
- # @param url [String] The URL of the Qdrant server
16
- # @param api_key [String] The API key to use
15
+ # @param url [String] The URL of the Chroma server
17
16
  # @param index_name [String] The name of the index to use
18
17
  # @param llm [Object] The LLM client to use
19
- def initialize(url:, index_name:, llm:, api_key: nil)
18
+ def initialize(url:, index_name:, llm:)
20
19
  depends_on "chroma-db"
21
20
 
22
21
  ::Chroma.connect_host = url
@@ -29,13 +28,13 @@ module Langchain::Vectorsearch
29
28
  end
30
29
 
31
30
  # Add a list of texts to the index
32
- # @param texts [Array] The list of texts to add
31
+ # @param texts [Array<String>] The list of texts to add
33
32
  # @return [Hash] The response from the server
34
33
  def add_texts(texts:, ids: [])
35
34
  embeddings = Array(texts).map.with_index do |text, i|
36
35
  ::Chroma::Resources::Embedding.new(
37
36
  id: ids[i] ? ids[i].to_s : SecureRandom.uuid,
38
- embedding: llm.embed(text: text),
37
+ embedding: llm.embed(text: text).embedding,
39
38
  # TODO: Add support for passing metadata
40
39
  metadata: {}, # metadatas[index],
41
40
  document: text # Do we actually need to store the whole original document?
@@ -50,7 +49,7 @@ module Langchain::Vectorsearch
50
49
  embeddings = Array(texts).map.with_index do |text, i|
51
50
  ::Chroma::Resources::Embedding.new(
52
51
  id: ids[i].to_s,
53
- embedding: llm.embed(text: text),
52
+ embedding: llm.embed(text: text).embedding,
54
53
  # TODO: Add support for passing metadata
55
54
  metadata: [], # metadatas[index],
56
55
  document: text # Do we actually need to store the whole original document?
@@ -61,19 +60,19 @@ module Langchain::Vectorsearch
61
60
  end
62
61
 
63
62
  # Create the collection with the default schema
64
- # @return [Hash] The response from the server
63
+ # @return [::Chroma::Resources::Collection] Created collection
65
64
  def create_default_schema
66
65
  ::Chroma::Resources::Collection.create(index_name)
67
66
  end
68
67
 
69
68
  # Get the default schema
70
- # @return [Hash] The response from the server
69
+ # @return [::Chroma::Resources::Collection] Default schema
71
70
  def get_default_schema
72
71
  ::Chroma::Resources::Collection.get(index_name)
73
72
  end
74
73
 
75
74
  # Delete the default schema
76
- # @return [Hash] The response from the server
75
+ # @return [bool] Success or failure
77
76
  def destroy_default_schema
78
77
  ::Chroma::Resources::Collection.delete(index_name)
79
78
  end
@@ -86,7 +85,7 @@ module Langchain::Vectorsearch
86
85
  query:,
87
86
  k: 4
88
87
  )
89
- embedding = llm.embed(text: query)
88
+ embedding = llm.embed(text: query).embedding
90
89
 
91
90
  similarity_search_by_vector(
92
91
  embedding: embedding,
@@ -95,7 +94,7 @@ module Langchain::Vectorsearch
95
94
  end
96
95
 
97
96
  # Search for similar texts by embedding
98
- # @param embedding [Array] The embedding to search for
97
+ # @param embedding [Array<Float>] The embedding to search for
99
98
  # @param k [Integer] The number of results to return
100
99
  # @return [Chroma::Resources::Embedding] The response from the server
101
100
  def similarity_search_by_vector(
@@ -35,15 +35,15 @@ module Langchain::Vectorsearch
35
35
  #
36
36
  # Add a list of texts and corresponding IDs to the index
37
37
  #
38
- # @param texts [Array] The list of texts to add
39
- # @param ids [Array] The list of corresponding IDs (integers) to the texts
38
+ # @param texts [Array<String>] The list of texts to add
39
+ # @param ids [Array<Integer>] The list of corresponding IDs (integers) to the texts
40
40
  # @return [Boolean] The response from the HNSW library
41
41
  #
42
42
  def add_texts(texts:, ids:)
43
43
  resize_index(texts.size)
44
44
 
45
45
  Array(texts).each_with_index do |text, i|
46
- embedding = llm.embed(text: text)
46
+ embedding = llm.embed(text: text).embedding
47
47
 
48
48
  client.add_point(embedding, ids[i])
49
49
  end
@@ -64,7 +64,7 @@ module Langchain::Vectorsearch
64
64
  query:,
65
65
  k: 4
66
66
  )
67
- embedding = llm.embed(text: query)
67
+ embedding = llm.embed(text: query).embedding
68
68
 
69
69
  similarity_search_by_vector(
70
70
  embedding: embedding,
@@ -75,7 +75,7 @@ module Langchain::Vectorsearch
75
75
  #
76
76
  # Search for the K nearest neighbors of a given vector
77
77
  #
78
- # @param embedding [Array] The embedding to search for
78
+ # @param embedding [Array<Float>] The embedding to search for
79
79
  # @param k [Integer] The number of results to return
80
80
  # @return [Array] Results in the format `[[id1, distance3], [id2, distance2]]`
81
81
  #
@@ -32,7 +32,7 @@ module Langchain::Vectorsearch
32
32
  }, {
33
33
  field_name: "vectors",
34
34
  type: ::Milvus::DATA_TYPES["float_vector"],
35
- field: Array(texts).map { |text| llm.embed(text: text) }
35
+ field: Array(texts).map { |text| llm.embed(text: text).embedding }
36
36
  }
37
37
  ]
38
38
  )
@@ -111,7 +111,7 @@ module Langchain::Vectorsearch
111
111
  end
112
112
 
113
113
  def similarity_search(query:, k: 4)
114
- embedding = llm.embed(text: query)
114
+ embedding = llm.embed(text: query).embedding
115
115
 
116
116
  similarity_search_by_vector(
117
117
  embedding: embedding,
@@ -52,7 +52,7 @@ module Langchain::Vectorsearch
52
52
  # the added or updated texts.
53
53
  def upsert_texts(texts:, ids:)
54
54
  data = texts.zip(ids).flat_map do |(text, id)|
55
- {id: id, content: text, vectors: llm.embed(text: text).to_s, namespace: namespace}
55
+ {id: id, content: text, vectors: llm.embed(text: text).embedding.to_s, namespace: namespace}
56
56
  end
57
57
  # @db[table_name.to_sym].multi_insert(data, return: :primary_key)
58
58
  @db[table_name.to_sym]
@@ -70,7 +70,7 @@ module Langchain::Vectorsearch
70
70
  def add_texts(texts:, ids: nil)
71
71
  if ids.nil? || ids.empty?
72
72
  data = texts.map do |text|
73
- {content: text, vectors: llm.embed(text: text).to_s, namespace: namespace}
73
+ {content: text, vectors: llm.embed(text: text).embedding.to_s, namespace: namespace}
74
74
  end
75
75
 
76
76
  @db[table_name.to_sym].multi_insert(data, return: :primary_key)
@@ -110,7 +110,7 @@ module Langchain::Vectorsearch
110
110
  # @param k [Integer] The number of top results to return
111
111
  # @return [Array<Hash>] The results of the search
112
112
  def similarity_search(query:, k: 4)
113
- embedding = llm.embed(text: query)
113
+ embedding = llm.embed(text: query).embedding
114
114
 
115
115
  similarity_search_by_vector(
116
116
  embedding: embedding,
@@ -31,7 +31,7 @@ module Langchain::Vectorsearch
31
31
  end
32
32
 
33
33
  # Find records by ids
34
- # @param ids [Array] The ids to find
34
+ # @param ids [Array<Integer>] The ids to find
35
35
  # @param namespace String The namespace to search through
36
36
  # @return [Hash] The response from the server
37
37
  def find(ids: [], namespace: "")
@@ -44,8 +44,8 @@ module Langchain::Vectorsearch
44
44
  end
45
45
 
46
46
  # Add a list of texts to the index
47
- # @param texts [Array] The list of texts to add
48
- # @param ids [Array] The list of IDs to add
47
+ # @param texts [Array<String>] The list of texts to add
48
+ # @param ids [Array<Integer>] The list of IDs to add
49
49
  # @param namespace [String] The namespace to add the texts to
50
50
  # @param metadata [Hash] The metadata to use for the texts
51
51
  # @return [Hash] The response from the server
@@ -54,7 +54,7 @@ module Langchain::Vectorsearch
54
54
  {
55
55
  id: ids[i] ? ids[i].to_s : SecureRandom.uuid,
56
56
  metadata: metadata || {content: text},
57
- values: llm.embed(text: text)
57
+ values: llm.embed(text: text).embedding
58
58
  }
59
59
  end
60
60
 
@@ -70,7 +70,7 @@ module Langchain::Vectorsearch
70
70
  .flatten
71
71
  .map do |path|
72
72
  data = Langchain::Loader.new(path)&.load&.chunks
73
- data.map { |chunk| chunk[:text] }
73
+ data.map { |chunk| chunk.text }
74
74
  end
75
75
 
76
76
  texts.flatten!
@@ -79,8 +79,8 @@ module Langchain::Vectorsearch
79
79
  end
80
80
 
81
81
  # Update a list of texts in the index
82
- # @param texts [Array] The list of texts to update
83
- # @param ids [Array] The list of IDs to update
82
+ # @param texts [Array<String>] The list of texts to update
83
+ # @param ids [Array<Integer>] The list of IDs to update
84
84
  # @param namespace [String] The namespace to update the texts in
85
85
  # @param metadata [Hash] The metadata to use for the texts
86
86
  # @return [Array] The response from the server
@@ -90,7 +90,7 @@ module Langchain::Vectorsearch
90
90
  index.update(
91
91
  namespace: namespace,
92
92
  id: ids[i].to_s,
93
- values: llm.embed(text: text),
93
+ values: llm.embed(text: text).embedding,
94
94
  set_metadata: metadata
95
95
  )
96
96
  end
@@ -130,7 +130,7 @@ module Langchain::Vectorsearch
130
130
  namespace: "",
131
131
  filter: nil
132
132
  )
133
- embedding = llm.embed(text: query)
133
+ embedding = llm.embed(text: query).embedding
134
134
 
135
135
  similarity_search_by_vector(
136
136
  embedding: embedding,
@@ -141,7 +141,7 @@ module Langchain::Vectorsearch
141
141
  end
142
142
 
143
143
  # Search for similar texts by embedding
144
- # @param embedding [Array] The embedding to search for
144
+ # @param embedding [Array<Float>] The embedding to search for
145
145
  # @param k [Integer] The number of results to return
146
146
  # @param namespace [String] The namespace to search in
147
147
  # @param filter [String] The filter to use
@@ -29,7 +29,7 @@ module Langchain::Vectorsearch
29
29
  end
30
30
 
31
31
  # Find records by ids
32
- # @param ids [Array] The ids to find
32
+ # @param ids [Array<Integer>] The ids to find
33
33
  # @return [Hash] The response from the server
34
34
  def find(ids: [])
35
35
  client.points.get_all(
@@ -41,7 +41,7 @@ module Langchain::Vectorsearch
41
41
  end
42
42
 
43
43
  # Add a list of texts to the index
44
- # @param texts [Array] The list of texts to add
44
+ # @param texts [Array<String>] The list of texts to add
45
45
  # @return [Hash] The response from the server
46
46
  def add_texts(texts:, ids: [])
47
47
  batch = {ids: [], vectors: [], payloads: []}
@@ -49,7 +49,7 @@ module Langchain::Vectorsearch
49
49
  Array(texts).each_with_index do |text, i|
50
50
  id = ids[i] || SecureRandom.uuid
51
51
  batch[:ids].push(id)
52
- batch[:vectors].push(llm.embed(text: text))
52
+ batch[:vectors].push(llm.embed(text: text).embedding)
53
53
  batch[:payloads].push({content: text})
54
54
  end
55
55
 
@@ -95,7 +95,7 @@ module Langchain::Vectorsearch
95
95
  query:,
96
96
  k: 4
97
97
  )
98
- embedding = llm.embed(text: query)
98
+ embedding = llm.embed(text: query).embedding
99
99
 
100
100
  similarity_search_by_vector(
101
101
  embedding: embedding,
@@ -104,7 +104,7 @@ module Langchain::Vectorsearch
104
104
  end
105
105
 
106
106
  # Search for similar texts by embedding
107
- # @param embedding [Array] The embedding to search for
107
+ # @param embedding [Array<Float>] The embedding to search for
108
108
  # @param k [Integer] The number of results to return
109
109
  # @return [Hash] The response from the server
110
110
  def similarity_search_by_vector(
@@ -32,7 +32,7 @@ module Langchain::Vectorsearch
32
32
  end
33
33
 
34
34
  # Add a list of texts to the index
35
- # @param texts [Array] The list of texts to add
35
+ # @param texts [Array<String>] The list of texts to add
36
36
  # @return [Hash] The response from the server
37
37
  def add_texts(texts:, ids: [])
38
38
  client.objects.batch_create(
@@ -41,7 +41,7 @@ module Langchain::Vectorsearch
41
41
  end
42
42
 
43
43
  # Update a list of texts in the index
44
- # @param texts [Array] The list of texts to update
44
+ # @param texts [Array<String>] The list of texts to update
45
45
  # @return [Hash] The response from the server
46
46
  def update_texts(texts:, ids:)
47
47
  uuids = []
@@ -65,7 +65,7 @@ module Langchain::Vectorsearch
65
65
  __id: ids[i].to_s,
66
66
  content: text
67
67
  },
68
- vector: llm.embed(text: text)
68
+ vector: llm.embed(text: text).embedding
69
69
  )
70
70
  end
71
71
  end
@@ -101,13 +101,13 @@ module Langchain::Vectorsearch
101
101
  # @param k [Integer|String] The number of results to return
102
102
  # @return [Hash] The search results
103
103
  def similarity_search(query:, k: 4)
104
- embedding = llm.embed(text: query)
104
+ embedding = llm.embed(text: query).embedding
105
105
 
106
106
  similarity_search_by_vector(embedding: embedding, k: k)
107
107
  end
108
108
 
109
109
  # Return documents similar to the vector
110
- # @param embedding [Array] The vector to search for
110
+ # @param embedding [Array<Float>] The vector to search for
111
111
  # @param k [Integer|String] The number of results to return
112
112
  # @return [Hash] The search results
113
113
  def similarity_search_by_vector(embedding:, k: 4)
@@ -154,7 +154,7 @@ module Langchain::Vectorsearch
154
154
  __id: id.to_s,
155
155
  content: text
156
156
  },
157
- vector: llm.embed(text: text)
157
+ vector: llm.embed(text: text).embedding
158
158
  }
159
159
  end
160
160
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.16"
4
+ VERSION = "0.6.18"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -7,8 +7,8 @@ require "zeitwerk"
7
7
  loader = Zeitwerk::Loader.for_gem
8
8
  loader.ignore("#{__dir__}/langchainrb.rb")
9
9
  loader.inflector.inflect(
10
- "ai_message" => "AIMessage",
11
10
  "ai21" => "AI21",
11
+ "ai21_response" => "AI21Response",
12
12
  "ai21_validator" => "AI21Validator",
13
13
  "csv" => "CSV",
14
14
  "html" => "HTML",
@@ -17,10 +17,12 @@ loader.inflector.inflect(
17
17
  "llm" => "LLM",
18
18
  "openai" => "OpenAI",
19
19
  "openai_validator" => "OpenAIValidator",
20
+ "openai_response" => "OpenAIResponse",
20
21
  "pdf" => "PDF",
21
22
  "react_agent" => "ReActAgent",
22
23
  "sql_query_agent" => "SQLQueryAgent"
23
24
  )
25
+ loader.collapse("#{__dir__}/langchain/llm/response")
24
26
  loader.setup
25
27
 
26
28
  # Langchain.rb a is library for building LLM-backed Ruby applications. It is an abstraction layer that sits on top of the emerging AI-related tools that makes it easy for developers to consume and string those services together.
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.16
4
+ version: 0.6.18
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-10-03 00:00:00.000000000 Z
11
+ date: 2023-10-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -184,14 +184,14 @@ dependencies:
184
184
  requirements:
185
185
  - - "~>"
186
186
  - !ruby/object:Gem::Version
187
- version: 0.3.0
187
+ version: 0.6.0
188
188
  type: :development
189
189
  prerelease: false
190
190
  version_requirements: !ruby/object:Gem::Requirement
191
191
  requirements:
192
192
  - - "~>"
193
193
  - !ruby/object:Gem::Version
194
- version: 0.3.0
194
+ version: 0.6.0
195
195
  - !ruby/object:Gem::Dependency
196
196
  name: cohere-ruby
197
197
  requirement: !ruby/object:Gem::Requirement
@@ -492,14 +492,14 @@ dependencies:
492
492
  requirements:
493
493
  - - "~>"
494
494
  - !ruby/object:Gem::Version
495
- version: 0.8.7
495
+ version: 0.8.9
496
496
  type: :development
497
497
  prerelease: false
498
498
  version_requirements: !ruby/object:Gem::Requirement
499
499
  requirements:
500
500
  - - "~>"
501
501
  - !ruby/object:Gem::Version
502
- version: 0.8.7
502
+ version: 0.8.9
503
503
  - !ruby/object:Gem::Dependency
504
504
  name: wikipedia-client
505
505
  requirement: !ruby/object:Gem::Requirement
@@ -532,17 +532,22 @@ files:
532
532
  - lib/langchain/agent/sql_query_agent.rb
533
533
  - lib/langchain/agent/sql_query_agent/sql_query_agent_answer_prompt.yaml
534
534
  - lib/langchain/agent/sql_query_agent/sql_query_agent_sql_prompt.yaml
535
- - lib/langchain/ai_message.rb
535
+ - lib/langchain/chunk.rb
536
536
  - lib/langchain/chunker/base.rb
537
+ - lib/langchain/chunker/prompts/semantic_prompt_template.yml
537
538
  - lib/langchain/chunker/recursive_text.rb
539
+ - lib/langchain/chunker/semantic.rb
538
540
  - lib/langchain/chunker/sentence.rb
539
541
  - lib/langchain/chunker/text.rb
540
542
  - lib/langchain/contextual_logger.rb
541
543
  - lib/langchain/conversation.rb
542
- - lib/langchain/conversation_memory.rb
544
+ - lib/langchain/conversation/context.rb
545
+ - lib/langchain/conversation/memory.rb
546
+ - lib/langchain/conversation/message.rb
547
+ - lib/langchain/conversation/prompt.rb
548
+ - lib/langchain/conversation/response.rb
543
549
  - lib/langchain/data.rb
544
550
  - lib/langchain/dependency_helper.rb
545
- - lib/langchain/human_message.rb
546
551
  - lib/langchain/llm/ai21.rb
547
552
  - lib/langchain/llm/anthropic.rb
548
553
  - lib/langchain/llm/base.rb
@@ -554,8 +559,16 @@ files:
554
559
  - lib/langchain/llm/openai.rb
555
560
  - lib/langchain/llm/prompts/summarize_template.yaml
556
561
  - lib/langchain/llm/replicate.rb
562
+ - lib/langchain/llm/response/ai21_response.rb
563
+ - lib/langchain/llm/response/anthropic_response.rb
564
+ - lib/langchain/llm/response/base_response.rb
565
+ - lib/langchain/llm/response/cohere_response.rb
566
+ - lib/langchain/llm/response/google_palm_response.rb
567
+ - lib/langchain/llm/response/hugging_face_response.rb
568
+ - lib/langchain/llm/response/ollama_response.rb
569
+ - lib/langchain/llm/response/openai_response.rb
570
+ - lib/langchain/llm/response/replicate_response.rb
557
571
  - lib/langchain/loader.rb
558
- - lib/langchain/message.rb
559
572
  - lib/langchain/output_parsers/base.rb
560
573
  - lib/langchain/output_parsers/output_fixing_parser.rb
561
574
  - lib/langchain/output_parsers/prompts/naive_fix_prompt.yaml
@@ -575,7 +588,6 @@ files:
575
588
  - lib/langchain/prompt/loading.rb
576
589
  - lib/langchain/prompt/prompt_template.rb
577
590
  - lib/langchain/railtie.rb
578
- - lib/langchain/system_message.rb
579
591
  - lib/langchain/tool/base.rb
580
592
  - lib/langchain/tool/calculator.rb
581
593
  - lib/langchain/tool/database.rb
@@ -1,84 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class ConversationMemory
5
- attr_reader :examples, :messages
6
-
7
- # The least number of tokens we want to be under the limit by
8
- TOKEN_LEEWAY = 20
9
-
10
- def initialize(llm:, messages: [], **options)
11
- @llm = llm
12
- @context = nil
13
- @summary = nil
14
- @examples = []
15
- @messages = messages
16
- @strategy = options.delete(:strategy) || :truncate
17
- @options = options
18
- end
19
-
20
- def set_context(message)
21
- @context = message
22
- end
23
-
24
- def add_examples(examples)
25
- @examples.concat examples
26
- end
27
-
28
- def append_message(message)
29
- @messages.append(message)
30
- end
31
-
32
- def reduce_messages(exception)
33
- case @strategy
34
- when :truncate
35
- truncate_messages(exception)
36
- when :summarize
37
- summarize_messages
38
- else
39
- raise "Unknown strategy: #{@options[:strategy]}"
40
- end
41
- end
42
-
43
- def context
44
- return if @context.nil? && @summary.nil?
45
-
46
- SystemMessage.new([@context, @summary].compact.join("\n"))
47
- end
48
-
49
- private
50
-
51
- def truncate_messages(exception)
52
- raise exception if @messages.size == 1
53
-
54
- token_overflow = exception.token_overflow
55
-
56
- @messages = @messages.drop_while do |message|
57
- proceed = token_overflow > -TOKEN_LEEWAY
58
- token_overflow -= token_length(message.to_json, model_name, llm: @llm)
59
-
60
- proceed
61
- end
62
- end
63
-
64
- def summarize_messages
65
- history = [@summary, @messages.to_json].compact.join("\n")
66
- partitions = [history[0, history.size / 2], history[history.size / 2, history.size]]
67
-
68
- @summary = partitions.map { |messages| @llm.summarize(text: messages.to_json) }.join("\n")
69
-
70
- @messages = [@messages.last]
71
- end
72
-
73
- def partition_messages
74
- end
75
-
76
- def model_name
77
- @llm.class::DEFAULTS[:chat_completion_model_name]
78
- end
79
-
80
- def token_length(content, model_name, options)
81
- @llm.class::LENGTH_VALIDATOR.token_length(content, model_name, options)
82
- end
83
- end
84
- end
@@ -1,35 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Langchain
4
- class Message
5
- attr_reader :content, :additional_kwargs
6
-
7
- def initialize(content, additional_kwargs = nil)
8
- @content = content
9
- @additional_kwargs = additional_kwargs
10
- end
11
-
12
- def type
13
- raise NotImplementedError
14
- end
15
-
16
- def to_s
17
- content
18
- end
19
-
20
- def ==(other)
21
- to_json == other.to_json
22
- end
23
-
24
- def to_json(options = {})
25
- hash = {
26
- type: type,
27
- content: content
28
- }
29
-
30
- hash[:additional_kwargs] = additional_kwargs unless additional_kwargs.nil? || additional_kwargs.empty?
31
-
32
- hash.to_json
33
- end
34
- end
35
- end