langchainrb 0.3.13 → 0.3.15

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2ee811b2bac8fadea4d90c4212363a901829a4aac219da0f2a2dcbe7c6f59c5b
4
- data.tar.gz: 8fa32e6df4aaf69cb6d29977913c1b8a30d6f65b777b1f90c8a7f504d869ca8f
3
+ metadata.gz: 4f855e3c0e1f0d7b59e0255004a1a806c7048da6d3fe0a8ddf10be68e36ed9ba
4
+ data.tar.gz: 5758c90205c3e2bea420cf7fa0dec07638917beced60bd0482e1d803ced96c07
5
5
  SHA512:
6
- metadata.gz: cbb7e0c975333248c01082a47f7096fb9d6807c3b7619424eb9348238008d7b4257518287d9358114bf4e3a589349520ebf71ace00bf1fe8906afd27e8b1418a
7
- data.tar.gz: 759444abe0b17518c6ef31fed6980f6bc0d3d096606860c4d6fddb8baeda4e0a23fc3909e42eba0f32912a786abec76cac54384533db2787e05d741f0907fa1d
6
+ metadata.gz: ec26f8c4257a6949d829d7f68d2175943b80c2837739bdbead8e6b61891a6738cf84ec1934caff777b63dee39f3d0111e8f29a81c37c125be1cd3ae8137b6968
7
+ data.tar.gz: 31bb1aa0296dbbc8e1a1c6c2bc7236c92bf3e627935c752bd527c7f7d769da69f11c6bb63de6e0b4e0d1d8d0c739744045669220c35e253ae83eaa0e23482e6a
data/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.3.15] - 2023-05-30
4
+ - Drop Ruby 2.7 support. It had reached EOD.
5
+ - Bump pgvector-ruby to 0.2
6
+ - 🚚 Loaders
7
+ - Support for options and block to be passed to CSV processor
8
+
9
+ ## [0.3.14] - 2023-05-28
10
+ - 🔍 Vectorsearch
11
+ - Not relying on Weaviate modules anymore
12
+ - Adding missing specs for Qdrant and Milvus classes
13
+ - 🚚 Loaders
14
+ - Add Langchain::Data result object for data loaders
15
+ - 🗣️ LLMs
16
+ - Add `summarize()` method to the LLMs
17
+
3
18
  ## [0.3.13] - 2023-05-26
4
19
  - 🔍 Vectorsearch
5
20
  - Pgvector support
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.3.13)
4
+ langchainrb (0.3.15)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
@@ -171,7 +171,7 @@ GEM
171
171
  ruby-rc4
172
172
  ttfunk
173
173
  pg (1.5.3)
174
- pgvector (0.1.1)
174
+ pgvector (0.2.0)
175
175
  pinecone (0.1.71)
176
176
  dry-struct (~> 1.6.0)
177
177
  dry-validation (~> 1.10.0)
@@ -298,7 +298,7 @@ DEPENDENCIES
298
298
  nokogiri (~> 1.13)
299
299
  pdf-reader (~> 1.4)
300
300
  pg (~> 1.5)
301
- pgvector (< 0.2)
301
+ pgvector (~> 0.2)
302
302
  pinecone (~> 0.1.6)
303
303
  pry-byebug (~> 3.10.0)
304
304
  qdrant-ruby (~> 0.9.0)
data/README.md CHANGED
@@ -321,6 +321,7 @@ Langchain.logger.level = :info
321
321
  [<img style="border-radius:50%" alt="Andrei Bondarev" src="https://avatars.githubusercontent.com/u/541665?v=4" width="80" height="80" class="avatar">](https://github.com/andreibondarev)
322
322
  [<img style="border-radius:50%" alt="Rafael Figueiredo" src="https://avatars.githubusercontent.com/u/35845775?v=4" width="80" height="80" class="avatar">](https://github.com/rafaelqfigueiredo)
323
323
  [<img style="border-radius:50%" alt="Ricky Chilcott" src="https://avatars.githubusercontent.com/u/445759?v=4" width="80" height="80" class="avatar">](https://github.com/rickychilcott)
324
+ [<img style="border-radius:50%" alt="Alex Chaplinsky" src="https://avatars.githubusercontent.com/u/695947?v=4" width="80" height="80" class="avatar">](https://github.com/alchaplinsky)
324
325
 
325
326
  (Criteria for becoming an Honorary Contributor or Core Contributor is pending...)
326
327
 
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Data
5
+ attr_reader :source
6
+
7
+ def initialize(data, options = {})
8
+ @source = options[:source]
9
+ @data = data
10
+ end
11
+
12
+ def value
13
+ @data
14
+ end
15
+ end
16
+ end
@@ -14,14 +14,15 @@ module Langchain
14
14
  # Equivalent to Langchain::Loader.new(path).load
15
15
  # @param path [String | Pathname] path to file or url
16
16
  # @return [String] file content
17
- def self.load(path)
18
- new(path).load
17
+ def self.load(path, options = {}, &block)
18
+ new(path, options).load(&block)
19
19
  end
20
20
 
21
21
  # Initialize Langchain::Loader
22
22
  # @param path [String | Pathname] path to file or url
23
23
  # @return [Langchain::Loader] loader instance
24
- def initialize(path)
24
+ def initialize(path, options = {})
25
+ @options = options
25
26
  @path = path
26
27
  end
27
28
 
@@ -35,38 +36,38 @@ module Langchain
35
36
 
36
37
  # Load data from a file or url
37
38
  # @return [String] file content
38
- def load
39
- url? ? from_url(@path) : from_path(@path)
39
+ def load(&block)
40
+ @raw_data = url? ? load_from_url : load_from_path
41
+
42
+ data = if block
43
+ yield @raw_data.read, @options
44
+ else
45
+ processor_klass.new(@options).parse(@raw_data)
46
+ end
47
+
48
+ Langchain::Data.new(data, source: @path)
40
49
  end
41
50
 
42
51
  private
43
52
 
44
- def from_url(url)
45
- process do
46
- data = URI.parse(url).open
47
- processor = find_processor(:CONTENT_TYPES, data.content_type)
48
- [data, processor]
49
- end
53
+ def load_from_url
54
+ URI.parse(@path).open
50
55
  end
51
56
 
52
- def from_path(path)
53
- raise FileNotFound unless File.exist?(path)
57
+ def load_from_path
58
+ raise FileNotFound unless File.exist?(@path)
54
59
 
55
- process do
56
- [File.open(path), find_processor(:EXTENSIONS, File.extname(path))]
57
- end
60
+ File.open(@path)
58
61
  end
59
62
 
60
- def process(&block)
61
- data, processor = yield
63
+ def processor_klass
64
+ raise UnknownFormatError unless (kind = find_processor)
62
65
 
63
- raise UnknownFormatError unless processor
64
-
65
- Langchain::Processors.const_get(processor).new.parse(data)
66
+ Langchain::Processors.const_get(kind)
66
67
  end
67
68
 
68
- def find_processor(constant, value)
69
- processors.find { |klass| processor_matches? "#{klass}::#{constant}", value }
69
+ def find_processor
70
+ processors.find { |klass| processor_matches? "#{klass}::#{lookup_constant}", source_type }
70
71
  end
71
72
 
72
73
  def processor_matches?(constant, value)
@@ -76,5 +77,13 @@ module Langchain
76
77
  def processors
77
78
  Langchain::Processors.constants
78
79
  end
80
+
81
+ def source_type
82
+ url? ? @raw_data.content_type : File.extname(@path)
83
+ end
84
+
85
+ def lookup_constant
86
+ url? ? :CONTENT_TYPES : :EXTENSIONS
87
+ end
79
88
  end
80
89
  end
@@ -6,6 +6,10 @@ module Langchain
6
6
  EXTENSIONS = []
7
7
  CONTENT_TYPES = []
8
8
 
9
+ def initialize(options = {})
10
+ @options = options
11
+ end
12
+
9
13
  def parse(data)
10
14
  raise NotImplementedError
11
15
  end
@@ -12,10 +12,16 @@ module Langchain
12
12
  # @param [File] data
13
13
  # @return [Array of Hash]
14
14
  def parse(data)
15
- ::CSV.new(data.read).map do |row|
15
+ ::CSV.new(data.read, col_sep: separator).map do |row|
16
16
  row.map(&:strip)
17
17
  end
18
18
  end
19
+
20
+ private
21
+
22
+ def separator
23
+ @options[:col_sep] || ","
24
+ end
19
25
  end
20
26
  end
21
27
  end
@@ -6,7 +6,7 @@ module Langchain
6
6
  EXTENSIONS = [".docx"]
7
7
  CONTENT_TYPES = ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"]
8
8
 
9
- def initialize
9
+ def initialize(*)
10
10
  depends_on "docx"
11
11
  require "docx"
12
12
  end
@@ -9,7 +9,7 @@ module Langchain
9
9
  # We only look for headings and paragraphs
10
10
  TEXT_CONTENT_TAGS = %w[h1 h2 h3 h4 h5 h6 p]
11
11
 
12
- def initialize
12
+ def initialize(*)
13
13
  depends_on "nokogiri"
14
14
  require "nokogiri"
15
15
  end
@@ -6,7 +6,7 @@ module Langchain
6
6
  EXTENSIONS = [".pdf"]
7
7
  CONTENT_TYPES = ["application/pdf"]
8
8
 
9
- def initialize
9
+ def initialize(*)
10
10
  depends_on "pdf-reader"
11
11
  require "pdf-reader"
12
12
  end
data/lib/langchain.rb CHANGED
@@ -17,6 +17,7 @@ module Langchain
17
17
  @root = Pathname.new(__dir__)
18
18
 
19
19
  autoload :Loader, "langchain/loader"
20
+ autoload :Data, "langchain/data"
20
21
 
21
22
  module Processors
22
23
  autoload :Base, "langchain/processors/base"
data/lib/llm/base.rb CHANGED
@@ -33,6 +33,11 @@ module LLM
33
33
  raise NotImplementedError, "#{self.class.name} does not support generating embeddings"
34
34
  end
35
35
 
36
+ # Method supported by an LLM that summarizes a given text
37
+ def summarize(...)
38
+ raise NotImplementedError, "#{self.class.name} does not support summarization"
39
+ end
40
+
36
41
  # Ensure that the LLM value passed in is supported
37
42
  # @param llm [Symbol] The LLM to use
38
43
  def self.validate_llm!(llm:)
data/lib/llm/cohere.rb CHANGED
@@ -16,9 +16,12 @@ module LLM
16
16
  @client = ::Cohere::Client.new(api_key: api_key)
17
17
  end
18
18
 
19
+ #
19
20
  # Generate an embedding for a given text
21
+ #
20
22
  # @param text [String] The text to generate an embedding for
21
23
  # @return [Hash] The embedding
24
+ #
22
25
  def embed(text:)
23
26
  response = client.embed(
24
27
  texts: [text],
@@ -27,9 +30,12 @@ module LLM
27
30
  response.dig("embeddings").first
28
31
  end
29
32
 
33
+ #
30
34
  # Generate a completion for a given prompt
35
+ #
31
36
  # @param prompt [String] The prompt to generate a completion for
32
37
  # @return [Hash] The completion
38
+ #
33
39
  def complete(prompt:, **params)
34
40
  default_params = {
35
41
  prompt: prompt,
@@ -51,5 +57,16 @@ module LLM
51
57
  def chat(...)
52
58
  complete(...)
53
59
  end
60
+
61
+ # Generate a summary in English for a given text
62
+ #
63
+ # More parameters available to extend this method with: https://github.com/andreibondarev/cohere-ruby/blob/0.9.4/lib/cohere/client.rb#L107-L115
64
+ #
65
+ # @param text [String] The text to generate a summary for
66
+ # @return [String] The summary
67
+ def summarize(text:)
68
+ response = client.summarize(text: text)
69
+ response.dig("summary")
70
+ end
54
71
  end
55
72
  end
@@ -81,5 +81,25 @@ module LLM
81
81
  response = client.generate_chat_message(**default_params)
82
82
  response.dig("candidates", 0, "content")
83
83
  end
84
+
85
+ #
86
+ # Generate a summarization for a given text
87
+ #
88
+ # @param text [String] The text to generate a summarization for
89
+ # @return [String] The summarization
90
+ #
91
+ def summarize(text:)
92
+ prompt_template = Prompt.load_from_path(
93
+ file_path: Langchain.root.join("llm/prompts/summarize_template.json")
94
+ )
95
+ prompt = prompt_template.format(text: text)
96
+
97
+ complete(
98
+ prompt: prompt,
99
+ temperature: DEFAULTS[:temperature],
100
+ # Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
101
+ max_tokens: 2048
102
+ )
103
+ end
84
104
  end
85
105
  end
@@ -12,6 +12,7 @@ module LLM
12
12
 
13
13
  #
14
14
  # Intialize the HuggingFace LLM
15
+ #
15
16
  # @param api_key [String] The API key to use
16
17
  #
17
18
  def initialize(api_key:)
@@ -21,9 +22,12 @@ module LLM
21
22
  @client = ::HuggingFace::InferenceApi.new(api_token: api_key)
22
23
  end
23
24
 
25
+ #
24
26
  # Generate an embedding for a given text
27
+ #
25
28
  # @param text [String] The text to embed
26
29
  # @return [Array] The embedding
30
+ #
27
31
  def embed(text:)
28
32
  client.embedding(
29
33
  input: text,
data/lib/llm/openai.rb CHANGED
@@ -18,9 +18,12 @@ module LLM
18
18
  @client = ::OpenAI::Client.new(access_token: api_key)
19
19
  end
20
20
 
21
+ #
21
22
  # Generate an embedding for a given text
23
+ #
22
24
  # @param text [String] The text to generate an embedding for
23
25
  # @return [Array] The embedding
26
+ #
24
27
  def embed(text:)
25
28
  response = client.embeddings(
26
29
  parameters: {
@@ -31,9 +34,12 @@ module LLM
31
34
  response.dig("data").first.dig("embedding")
32
35
  end
33
36
 
37
+ #
34
38
  # Generate a completion for a given prompt
39
+ #
35
40
  # @param prompt [String] The prompt to generate a completion for
36
41
  # @return [String] The completion
42
+ #
37
43
  def complete(prompt:, **params)
38
44
  default_params = {
39
45
  model: DEFAULTS[:completion_model_name],
@@ -51,9 +57,12 @@ module LLM
51
57
  response.dig("choices", 0, "text")
52
58
  end
53
59
 
60
+ #
54
61
  # Generate a chat completion for a given prompt
62
+ #
55
63
  # @param prompt [String] The prompt to generate a chat completion for
56
64
  # @return [String] The chat completion
65
+ #
57
66
  def chat(prompt:, **params)
58
67
  default_params = {
59
68
  model: DEFAULTS[:chat_completion_model_name],
@@ -71,5 +80,25 @@ module LLM
71
80
  response = client.chat(parameters: default_params)
72
81
  response.dig("choices", 0, "message", "content")
73
82
  end
83
+
84
+ #
85
+ # Generate a summary for a given text
86
+ #
87
+ # @param text [String] The text to generate a summary for
88
+ # @return [String] The summary
89
+ #
90
+ def summarize(text:)
91
+ prompt_template = Prompt.load_from_path(
92
+ file_path: Langchain.root.join("llm/prompts/summarize_template.json")
93
+ )
94
+ prompt = prompt_template.format(text: text)
95
+
96
+ complete(
97
+ prompt: prompt,
98
+ temperature: DEFAULTS[:temperature],
99
+ # Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
100
+ max_tokens: 2048
101
+ )
102
+ end
74
103
  end
75
104
  end
@@ -0,0 +1,5 @@
1
+ {
2
+ "_type": "prompt",
3
+ "input_variables": ["text"],
4
+ "template": "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY:"
5
+ }
data/lib/llm/replicate.rb CHANGED
@@ -23,8 +23,11 @@ module LLM
23
23
  dimension: 384
24
24
  }.freeze
25
25
 
26
+ #
26
27
  # Intialize the Replicate LLM
28
+ #
27
29
  # @param api_key [String] The API key to use
30
+ #
28
31
  def initialize(api_key:)
29
32
  depends_on "replicate-ruby"
30
33
  require "replicate"
@@ -36,9 +39,12 @@ module LLM
36
39
  @client = ::Replicate.client
37
40
  end
38
41
 
42
+ #
39
43
  # Generate an embedding for a given text
44
+ #
40
45
  # @param text [String] The text to generate an embedding for
41
46
  # @return [Hash] The embedding
47
+ #
42
48
  def embed(text:)
43
49
  response = embeddings_model.predict(input: text)
44
50
 
@@ -50,9 +56,12 @@ module LLM
50
56
  response.output
51
57
  end
52
58
 
59
+ #
53
60
  # Generate a completion for a given prompt
61
+ #
54
62
  # @param prompt [String] The prompt to generate a completion for
55
63
  # @return [Hash] The completion
64
+ #
56
65
  def complete(prompt:, **params)
57
66
  response = completion_model.predict(prompt: prompt)
58
67
 
@@ -73,6 +82,26 @@ module LLM
73
82
  complete(...)
74
83
  end
75
84
 
85
+ #
86
+ # Generate a summary for a given text
87
+ #
88
+ # @param text [String] The text to generate a summary for
89
+ # @return [String] The summary
90
+ #
91
+ def summarize(text:)
92
+ prompt_template = Prompt.load_from_path(
93
+ file_path: Langchain.root.join("llm/prompts/summarize_template.json")
94
+ )
95
+ prompt = prompt_template.format(text: text)
96
+
97
+ complete(
98
+ prompt: prompt,
99
+ temperature: DEFAULTS[:temperature],
100
+ # Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
101
+ max_tokens: 2048
102
+ )
103
+ end
104
+
76
105
  alias_method :generate_embedding, :embed
77
106
 
78
107
  private
@@ -74,7 +74,7 @@ module Vectorsearch
74
74
 
75
75
  texts = Array(path || paths)
76
76
  .flatten
77
- .map { |path| Langchain::Loader.new(path)&.load }
77
+ .map { |path| Langchain::Loader.new(path)&.load&.value }
78
78
  .compact
79
79
 
80
80
  add_texts(texts: texts)
@@ -14,9 +14,7 @@ module Vectorsearch
14
14
 
15
15
  @client = ::Weaviate::Client.new(
16
16
  url: url,
17
- api_key: api_key,
18
- model_service: llm,
19
- model_service_api_key: llm_api_key
17
+ api_key: api_key
20
18
  )
21
19
  @index_name = index_name
22
20
 
@@ -30,7 +28,8 @@ module Vectorsearch
30
28
  objects = Array(texts).map do |text|
31
29
  {
32
30
  class: index_name,
33
- properties: {content: text}
31
+ properties: {content: text},
32
+ vector: llm_client.embed(text: text)
34
33
  }
35
34
  end
36
35
 
@@ -43,11 +42,7 @@ module Vectorsearch
43
42
  def create_default_schema
44
43
  client.schema.create(
45
44
  class_name: index_name,
46
- vectorizer: "text2vec-#{llm}",
47
- # TODO: Figure out a way to optionally enable it
48
- # "module_config": {
49
- # "qna-openai": {}
50
- # },
45
+ vectorizer: "none",
51
46
  properties: [
52
47
  # TODO: Allow passing in your own IDs
53
48
  {
@@ -63,14 +58,9 @@ module Vectorsearch
63
58
  # @param k [Integer|String] The number of results to return
64
59
  # @return [Hash] The search results
65
60
  def similarity_search(query:, k: 4)
66
- near_text = "{ concepts: [\"#{query}\"] }"
61
+ embedding = llm_client.embed(text: query)
67
62
 
68
- client.query.get(
69
- class_name: index_name,
70
- near_text: near_text,
71
- limit: k.to_s,
72
- fields: "content _additional { id }"
73
- )
63
+ similarity_search_by_vector(embedding: embedding, k: k)
74
64
  end
75
65
 
76
66
  # Return documents similar to the vector
@@ -92,29 +82,16 @@ module Vectorsearch
92
82
  # @param question [String] The question to ask
93
83
  # @return [Hash] The answer
94
84
  def ask(question:)
95
- # Weaviate currently supports the `ask:` parameter only for the OpenAI LLM (with `qna-openai` module enabled).
96
- # The Cohere support is on the way: https://github.com/weaviate/weaviate/pull/2600
97
- if llm == :openai
98
- ask_object = "{ question: \"#{question}\" }"
99
-
100
- client.query.get(
101
- class_name: index_name,
102
- ask: ask_object,
103
- limit: "1",
104
- fields: "_additional { answer { result } }"
105
- )
106
- elsif llm == :cohere
107
- search_results = similarity_search(query: question)
85
+ search_results = similarity_search(query: question)
108
86
 
109
- context = search_results.map do |result|
110
- result.dig("content").to_s
111
- end
112
- context = context.join("\n---\n")
87
+ context = search_results.map do |result|
88
+ result.dig("content").to_s
89
+ end
90
+ context = context.join("\n---\n")
113
91
 
114
- prompt = generate_prompt(question: question, context: context)
92
+ prompt = generate_prompt(question: question, context: context)
115
93
 
116
- llm_client.chat(prompt: prompt)
117
- end
94
+ llm_client.chat(prompt: prompt)
118
95
  end
119
96
  end
120
97
  end
data/lib/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.3.13"
4
+ VERSION = "0.3.15"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.13
4
+ version: 0.3.15
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-05-26 00:00:00.000000000 Z
11
+ date: 2023-05-30 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: dotenv-rails
@@ -182,14 +182,14 @@ dependencies:
182
182
  name: pgvector
183
183
  requirement: !ruby/object:Gem::Requirement
184
184
  requirements:
185
- - - "<"
185
+ - - "~>"
186
186
  - !ruby/object:Gem::Version
187
187
  version: '0.2'
188
188
  type: :development
189
189
  prerelease: false
190
190
  version_requirements: !ruby/object:Gem::Requirement
191
191
  requirements:
192
- - - "<"
192
+ - - "~>"
193
193
  - !ruby/object:Gem::Version
194
194
  version: '0.2'
195
195
  - !ruby/object:Gem::Dependency
@@ -290,7 +290,7 @@ dependencies:
290
290
  - - "~>"
291
291
  - !ruby/object:Gem::Version
292
292
  version: 1.17.0
293
- description: Build ML/AI-powered applications with Ruby's LangChain
293
+ description: Build LLM-backed Ruby applications with Ruby's LangChain
294
294
  email:
295
295
  - andrei.bondarev13@gmail.com
296
296
  executables: []
@@ -316,6 +316,7 @@ files:
316
316
  - lib/agent/chain_of_thought_agent/chain_of_thought_agent_prompt.json
317
317
  - lib/dependency_helper.rb
318
318
  - lib/langchain.rb
319
+ - lib/langchain/data.rb
319
320
  - lib/langchain/loader.rb
320
321
  - lib/langchain/processors/base.rb
321
322
  - lib/langchain/processors/csv.rb
@@ -331,6 +332,7 @@ files:
331
332
  - lib/llm/google_palm.rb
332
333
  - lib/llm/hugging_face.rb
333
334
  - lib/llm/openai.rb
335
+ - lib/llm/prompts/summarize_template.json
334
336
  - lib/llm/replicate.rb
335
337
  - lib/prompt/base.rb
336
338
  - lib/prompt/few_shot_prompt_template.rb
@@ -364,15 +366,15 @@ required_ruby_version: !ruby/object:Gem::Requirement
364
366
  requirements:
365
367
  - - ">="
366
368
  - !ruby/object:Gem::Version
367
- version: 2.6.0
369
+ version: 3.0.0
368
370
  required_rubygems_version: !ruby/object:Gem::Requirement
369
371
  requirements:
370
372
  - - ">="
371
373
  - !ruby/object:Gem::Version
372
374
  version: '0'
373
375
  requirements: []
374
- rubygems_version: 3.2.3
376
+ rubygems_version: 3.3.7
375
377
  signing_key:
376
378
  specification_version: 4
377
- summary: Build ML/AI-powered applications with Ruby's LangChain
379
+ summary: Build LLM-backed Ruby applications with Ruby's LangChain
378
380
  test_files: []