langchainrb 0.6.11 → 0.6.13

Sign up to get free protection for your applications and to get access to all the features.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +5 -11
  4. data/lib/langchain/agent/base.rb +1 -0
  5. data/lib/langchain/agent/{react_agent/react_agent.rb → react_agent.rb} +12 -11
  6. data/lib/langchain/ai_message.rb +9 -0
  7. data/lib/langchain/conversation.rb +11 -11
  8. data/lib/langchain/conversation_memory.rb +3 -7
  9. data/lib/langchain/human_message.rb +9 -0
  10. data/lib/langchain/llm/cohere.rb +3 -2
  11. data/lib/langchain/llm/google_palm.rb +16 -11
  12. data/lib/langchain/llm/llama_cpp.rb +5 -5
  13. data/lib/langchain/llm/openai.rb +24 -25
  14. data/lib/langchain/llm/replicate.rb +2 -1
  15. data/lib/langchain/loader.rb +3 -2
  16. data/lib/langchain/message.rb +35 -0
  17. data/lib/langchain/output_parsers/base.rb +5 -4
  18. data/lib/langchain/output_parsers/{fix.rb → output_fixing_parser.rb} +3 -1
  19. data/lib/langchain/prompt/loading.rb +73 -67
  20. data/lib/langchain/prompt.rb +5 -0
  21. data/lib/langchain/system_message.rb +9 -0
  22. data/lib/langchain/tool/base.rb +14 -14
  23. data/lib/langchain/vectorsearch/chroma.rb +3 -2
  24. data/lib/langchain/vectorsearch/milvus.rb +4 -3
  25. data/lib/langchain/vectorsearch/pgvector.rb +10 -7
  26. data/lib/langchain/vectorsearch/pinecone.rb +18 -2
  27. data/lib/langchain/vectorsearch/qdrant.rb +4 -3
  28. data/lib/langchain/vectorsearch/weaviate.rb +3 -2
  29. data/lib/langchain/version.rb +1 -1
  30. data/lib/langchain.rb +19 -97
  31. metadata +49 -50
  32. data/.env.example +0 -21
  33. data/.rspec +0 -3
  34. data/.rubocop.yml +0 -11
  35. data/.tool-versions +0 -1
  36. data/Gemfile +0 -14
  37. data/Gemfile.lock +0 -360
  38. data/Rakefile +0 -17
  39. data/examples/conversation_with_openai.rb +0 -52
  40. data/examples/create_and_manage_few_shot_prompt_templates.rb +0 -36
  41. data/examples/create_and_manage_prompt_templates.rb +0 -25
  42. data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +0 -116
  43. data/examples/llama_cpp.rb +0 -24
  44. data/examples/open_ai_function_calls.rb +0 -41
  45. data/examples/open_ai_qdrant_function_calls.rb +0 -39
  46. data/examples/pdf_store_and_query_with_chroma.rb +0 -40
  47. data/examples/store_and_query_with_pinecone.rb +0 -46
  48. data/examples/store_and_query_with_qdrant.rb +0 -37
  49. data/examples/store_and_query_with_weaviate.rb +0 -32
  50. data/lefthook.yml +0 -5
  51. data/sig/langchain.rbs +0 -4
  52. /data/lib/langchain/agent/{sql_query_agent/sql_query_agent.rb → sql_query_agent.rb} +0 -0
  53. /data/lib/langchain/output_parsers/{structured.rb → structured_output_parser.rb} +0 -0
@@ -11,82 +11,88 @@ module Langchain::Prompt
11
11
  "few_shot" => ->(config) { load_few_shot_prompt(config) }
12
12
  }
13
13
 
14
- class << self
15
- #
16
- # Load prompt from file.
17
- #
18
- # @param file_path [String, Pathname] The path of the file to read the configuration data from.
19
- #
20
- # @return [Object] The loaded prompt loaded.
21
- #
22
- # @raise [ArgumentError] If the file type of the specified file path is not supported.
23
- #
24
- def load_from_path(file_path:)
25
- file_path = file_path.is_a?(String) ? Pathname.new(file_path) : file_path
26
-
27
- case file_path.extname
28
- when ".json"
29
- config = JSON.parse(File.read(file_path))
30
- when ".yaml", ".yml"
31
- config = YAML.safe_load(File.read(file_path))
32
- else
33
- raise ArgumentError, "Got unsupported file type #{file_path.extname}"
34
- end
35
-
36
- load_from_config(config)
14
+ module Loading
15
+ def self.included(base)
16
+ base.extend ClassMethods
37
17
  end
38
18
 
39
- #
40
- # Loads a prompt template with the given configuration.
41
- #
42
- # @param config [Hash] A hash containing the configuration for the prompt.
43
- #
44
- # @return [PromptTemplate] The loaded prompt loaded.
45
- #
46
- def load_prompt(config)
47
- template, input_variables = config.values_at("template", "input_variables")
48
- PromptTemplate.new(template: template, input_variables: input_variables)
49
- end
19
+ module ClassMethods
20
+ #
21
+ # Load prompt from file.
22
+ #
23
+ # @param file_path [String, Pathname] The path of the file to read the configuration data from.
24
+ #
25
+ # @return [Object] The loaded prompt loaded.
26
+ #
27
+ # @raise [ArgumentError] If the file type of the specified file path is not supported.
28
+ #
29
+ def load_from_path(file_path:)
30
+ file_path = file_path.is_a?(String) ? Pathname.new(file_path) : file_path
50
31
 
51
- #
52
- # Loads a prompt template with the given configuration.
53
- #
54
- # @param config [Hash] A hash containing the configuration for the prompt.
55
- #
56
- # @return [FewShotPromptTemplate] The loaded prompt loaded.
57
- #
58
- def load_few_shot_prompt(config)
59
- prefix, suffix, example_prompt, examples, input_variables = config.values_at("prefix", "suffix", "example_prompt", "examples", "input_variables")
60
- example_prompt = load_prompt(example_prompt)
61
- FewShotPromptTemplate.new(prefix: prefix, suffix: suffix, example_prompt: example_prompt, examples: examples, input_variables: input_variables)
62
- end
32
+ case file_path.extname
33
+ when ".json"
34
+ config = JSON.parse(File.read(file_path))
35
+ when ".yaml", ".yml"
36
+ config = YAML.safe_load(File.read(file_path))
37
+ else
38
+ raise ArgumentError, "Got unsupported file type #{file_path.extname}"
39
+ end
63
40
 
64
- private
41
+ load_from_config(config)
42
+ end
65
43
 
66
- #
67
- # Loads the prompt from the given configuration hash
68
- #
69
- # @param config [Hash] the configuration hash to load from
70
- #
71
- # @return [Object] the loaded prompt
72
- #
73
- # @raise [ArgumentError] if the prompt type specified in the config is not supported
74
- #
75
- def load_from_config(config)
76
- # If `_type` key is not present in the configuration hash, add it with a default value of `prompt`
77
- unless config.key?("_type")
78
- Langchain.logger.warn "No `_type` key found, defaulting to `prompt`"
79
- config["_type"] = "prompt"
44
+ #
45
+ # Loads a prompt template with the given configuration.
46
+ #
47
+ # @param config [Hash] A hash containing the configuration for the prompt.
48
+ #
49
+ # @return [PromptTemplate] The loaded prompt loaded.
50
+ #
51
+ def load_prompt(config)
52
+ template, input_variables = config.values_at("template", "input_variables")
53
+ PromptTemplate.new(template: template, input_variables: input_variables)
80
54
  end
81
55
 
82
- # If the prompt type specified in the configuration hash is not supported, raise an exception
83
- unless TYPE_TO_LOADER.key?(config["_type"])
84
- raise ArgumentError, "Loading #{config["_type"]} prompt not supported"
56
+ #
57
+ # Loads a prompt template with the given configuration.
58
+ #
59
+ # @param config [Hash] A hash containing the configuration for the prompt.
60
+ #
61
+ # @return [FewShotPromptTemplate] The loaded prompt loaded.
62
+ #
63
+ def load_few_shot_prompt(config)
64
+ prefix, suffix, example_prompt, examples, input_variables = config.values_at("prefix", "suffix", "example_prompt", "examples", "input_variables")
65
+ example_prompt = load_prompt(example_prompt)
66
+ FewShotPromptTemplate.new(prefix: prefix, suffix: suffix, example_prompt: example_prompt, examples: examples, input_variables: input_variables)
85
67
  end
86
68
 
87
- # Load the prompt using the corresponding loader function from the `TYPE_TO_LOADER` hash
88
- prompt_loader = TYPE_TO_LOADER[config["_type"]]
89
- prompt_loader.call(config)
69
+ private
70
+
71
+ #
72
+ # Loads the prompt from the given configuration hash
73
+ #
74
+ # @param config [Hash] the configuration hash to load from
75
+ #
76
+ # @return [Object] the loaded prompt
77
+ #
78
+ # @raise [ArgumentError] if the prompt type specified in the config is not supported
79
+ #
80
+ def load_from_config(config)
81
+ # If `_type` key is not present in the configuration hash, add it with a default value of `prompt`
82
+ unless config.key?("_type")
83
+ Langchain.logger.warn "No `_type` key found, defaulting to `prompt`"
84
+ config["_type"] = "prompt"
85
+ end
86
+
87
+ # If the prompt type specified in the configuration hash is not supported, raise an exception
88
+ unless TYPE_TO_LOADER.key?(config["_type"])
89
+ raise ArgumentError, "Loading #{config["_type"]} prompt not supported"
90
+ end
91
+
92
+ # Load the prompt using the corresponding loader function from the `TYPE_TO_LOADER` hash
93
+ prompt_loader = TYPE_TO_LOADER[config["_type"]]
94
+ prompt_loader.call(config)
95
+ end
90
96
  end
91
97
  end
92
98
  end
@@ -0,0 +1,5 @@
1
+ module Langchain
2
+ module Prompt
3
+ include Loading
4
+ end
5
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class SystemMessage < Message
5
+ def type
6
+ "system"
7
+ end
8
+ end
9
+ end
@@ -7,16 +7,18 @@ module Langchain::Tool
7
7
  #
8
8
  # == Available Tools
9
9
  #
10
- # - {Langchain::Tool::Calculator}: Calculate the result of a math expression
11
- # - {Langchain::Tool::RubyCodeInterpretor}: Runs ruby code
10
+ # - {Langchain::Tool::Calculator}: calculate the result of a math expression
11
+ # - {Langchain::Tool::Database}: executes SQL queries
12
12
  # - {Langchain::Tool::GoogleSearch}: search on Google (via SerpAPI)
13
+ # - {Langchain::Tool::RubyCodeInterpreter}: runs ruby code
14
+ # - {Langchain::Tool::Weather}: gets current weather data
13
15
  # - {Langchain::Tool::Wikipedia}: search on Wikipedia
14
16
  #
15
17
  # == Usage
16
18
  #
17
19
  # 1. Pick the tools you'd like to pass to an Agent and install the gems listed under **Gem Requirements**
18
20
  #
19
- # # To use all 3 tools:
21
+ # # For example to use the Calculator, GoogleSearch, and Wikipedia:
20
22
  # gem install eqn
21
23
  # gem install google_search_results
22
24
  # gem install wikipedia-client
@@ -28,16 +30,14 @@ module Langchain::Tool
28
30
  # 3. Pass the tools when Agent is instantiated.
29
31
  #
30
32
  # agent = Langchain::Agent::ReActAgent.new(
31
- # llm: :openai, # or :cohere, :hugging_face, :google_palm or :replicate
32
- # llm_api_key: ENV["OPENAI_API_KEY"],
33
- # tools: ["google_search", "calculator", "wikipedia"]
33
+ # llm: Langchain::LLM::OpenAI.new(api_key: "YOUR_API_KEY"), # or other like Cohere, Hugging Face, Google Palm or Replicate
34
+ # tools: [
35
+ # Langchain::Tool::GoogleSearch.new(api_key: "YOUR_API_KEY"),
36
+ # Langchain::Tool::Calculator.new,
37
+ # Langchain::Tool::Wikipedia.new
38
+ # ]
34
39
  # )
35
40
  #
36
- # 4. Confirm that the Agent is using the Tools you passed in:
37
- #
38
- # agent.tools
39
- # # => ["google_search", "calculator", "wikipedia"]
40
- #
41
41
  # == Adding Tools
42
42
  #
43
43
  # 1. Create a new file in lib/langchain/tool/your_tool_name.rb
@@ -53,7 +53,7 @@ module Langchain::Tool
53
53
  #
54
54
  # @return [String] tool name
55
55
  #
56
- def tool_name
56
+ def name
57
57
  self.class.const_get(:NAME)
58
58
  end
59
59
 
@@ -68,7 +68,7 @@ module Langchain::Tool
68
68
  #
69
69
  # @return [String] tool description
70
70
  #
71
- def tool_description
71
+ def description
72
72
  self.class.const_get(:DESCRIPTION)
73
73
  end
74
74
 
@@ -109,7 +109,7 @@ module Langchain::Tool
109
109
  #
110
110
  def self.validate_tools!(tools:)
111
111
  # Check if the tool count is equal to unique tool count
112
- if tools.count != tools.map(&:tool_name).uniq.count
112
+ if tools.count != tools.map(&:name).uniq.count
113
113
  raise ArgumentError, "Either tools are not unique or are conflicting with each other"
114
114
  end
115
115
  end
@@ -113,10 +113,11 @@ module Langchain::Vectorsearch
113
113
 
114
114
  # Ask a question and return the answer
115
115
  # @param question [String] The question to ask
116
+ # @param k [Integer] The number of results to have in context
116
117
  # @yield [String] Stream responses back one String at a time
117
118
  # @return [String] The answer to the question
118
- def ask(question:, &block)
119
- search_results = similarity_search(query: question)
119
+ def ask(question:, k: 4, &block)
120
+ search_results = similarity_search(query: question, k: k)
120
121
 
121
122
  context = search_results.map do |result|
122
123
  result.document
@@ -5,7 +5,7 @@ module Langchain::Vectorsearch
5
5
  #
6
6
  # Wrapper around Milvus REST APIs.
7
7
  #
8
- # Gem requirements: gem "milvus", "~> 0.9.0"
8
+ # Gem requirements: gem "milvus", "~> 0.9.2"
9
9
  #
10
10
  # Usage:
11
11
  # milvus = Langchain::Vectorsearch::Milvus.new(url:, index_name:, llm:, api_key:)
@@ -138,10 +138,11 @@ module Langchain::Vectorsearch
138
138
 
139
139
  # Ask a question and return the answer
140
140
  # @param question [String] The question to ask
141
+ # @param k [Integer] The number of results to have in context
141
142
  # @yield [String] Stream responses back one String at a time
142
143
  # @return [String] The answer to the question
143
- def ask(question:, &block)
144
- search_results = similarity_search(query: question)
144
+ def ask(question:, k: 4, &block)
145
+ search_results = similarity_search(query: question, k: k)
145
146
 
146
147
  content_field = search_results.dig("results", "fields_data").select { |field| field.dig("field_name") == "content" }
147
148
  content_data = content_field.first.dig("Field", "Scalars", "Data", "StringData", "data")
@@ -8,7 +8,7 @@ module Langchain::Vectorsearch
8
8
  # Gem requirements: gem "pgvector", "~> 0.2"
9
9
  #
10
10
  # Usage:
11
- # pgvector = Langchain::Vectorsearch::Pgvector.new(url:, index_name:, llm:, namespace_column: nil, namespace: nil)
11
+ # pgvector = Langchain::Vectorsearch::Pgvector.new(url:, index_name:, llm:, namespace: nil)
12
12
  #
13
13
 
14
14
  # The operators supported by the PostgreSQL vector search adapter
@@ -90,20 +90,22 @@ module Langchain::Vectorsearch
90
90
  end
91
91
 
92
92
  # Create default schema
93
- # @return [PG::Result] The response from the database
94
93
  def create_default_schema
95
94
  db.run "CREATE EXTENSION IF NOT EXISTS vector"
96
- namespace = namespace_column
95
+ namespace_column = @namespace_column
97
96
  vector_dimension = default_dimension
98
97
  db.create_table? table_name.to_sym do
99
98
  primary_key :id
100
99
  text :content
101
100
  column :vectors, "vector(#{vector_dimension})"
102
- text namespace.to_sym, default: nil
101
+ text namespace_column.to_sym, default: nil
103
102
  end
104
103
  end
105
104
 
106
- # TODO: Add destroy_default_schema method
105
+ # Destroy default schema
106
+ def destroy_default_schema
107
+ db.drop_table? table_name.to_sym
108
+ end
107
109
 
108
110
  # Search for similar texts in the index
109
111
  # @param query [String] The text to search for
@@ -133,10 +135,11 @@ module Langchain::Vectorsearch
133
135
 
134
136
  # Ask a question and return the answer
135
137
  # @param question [String] The question to ask
138
+ # @param k [Integer] The number of results to have in context
136
139
  # @yield [String] Stream responses back one String at a time
137
140
  # @return [String] The answer to the question
138
- def ask(question:, &block)
139
- search_results = similarity_search(query: question)
141
+ def ask(question:, k: 4, &block)
142
+ search_results = similarity_search(query: question, k: k)
140
143
 
141
144
  context = search_results.map do |result|
142
145
  result.content.to_s
@@ -51,6 +51,21 @@ module Langchain::Vectorsearch
51
51
  index.upsert(vectors: vectors, namespace: namespace)
52
52
  end
53
53
 
54
+ def add_data(paths:, namespace: "")
55
+ raise ArgumentError, "Paths must be provided" if Array(paths).empty?
56
+
57
+ texts = Array(paths)
58
+ .flatten
59
+ .map do |path|
60
+ data = Langchain::Loader.new(path)&.load&.chunks
61
+ data.map { |chunk| chunk[:text] }
62
+ end
63
+
64
+ texts.flatten!
65
+
66
+ add_texts(texts: texts, namespace: namespace)
67
+ end
68
+
54
69
  # Update a list of texts in the index
55
70
  # @param texts [Array] The list of texts to update
56
71
  # @param ids [Array] The list of IDs to update
@@ -138,11 +153,12 @@ module Langchain::Vectorsearch
138
153
  # Ask a question and return the answer
139
154
  # @param question [String] The question to ask
140
155
  # @param namespace [String] The namespace to search in
156
+ # @param k [Integer] The number of results to have in context
141
157
  # @param filter [String] The filter to use
142
158
  # @yield [String] Stream responses back one String at a time
143
159
  # @return [String] The answer to the question
144
- def ask(question:, namespace: "", filter: nil, &block)
145
- search_results = similarity_search(query: question, namespace: namespace, filter: filter)
160
+ def ask(question:, namespace: "", filter: nil, k: 4, &block)
161
+ search_results = similarity_search(query: question, namespace: namespace, filter: filter, k: k)
146
162
 
147
163
  context = search_results.map do |result|
148
164
  result.dig("metadata").to_s
@@ -5,7 +5,7 @@ module Langchain::Vectorsearch
5
5
  #
6
6
  # Wrapper around Qdrant
7
7
  #
8
- # Gem requirements: gem "qdrant-ruby", "~> 0.9.0"
8
+ # Gem requirements: gem "qdrant-ruby", "~> 0.9.3"
9
9
  #
10
10
  # Usage:
11
11
  # qdrant = Langchain::Vectorsearch::Qdrant.new(url:, api_key:, index_name:, llm:, llm_api_key:)
@@ -112,10 +112,11 @@ module Langchain::Vectorsearch
112
112
 
113
113
  # Ask a question and return the answer
114
114
  # @param question [String] The question to ask
115
+ # @param k [Integer] The number of results to have in context
115
116
  # @yield [String] Stream responses back one String at a time
116
117
  # @return [String] The answer to the question
117
- def ask(question:, &block)
118
- search_results = similarity_search(query: question)
118
+ def ask(question:, k: 4, &block)
119
+ search_results = similarity_search(query: question, k: k)
119
120
 
120
121
  context = search_results.map do |result|
121
122
  result.dig("payload").to_s
@@ -124,10 +124,11 @@ module Langchain::Vectorsearch
124
124
 
125
125
  # Ask a question and return the answer
126
126
  # @param question [String] The question to ask
127
+ # @param k [Integer] The number of results to have in context
127
128
  # @yield [String] Stream responses back one String at a time
128
129
  # @return [Hash] The answer
129
- def ask(question:, &block)
130
- search_results = similarity_search(query: question)
130
+ def ask(question:, k: 4, &block)
131
+ search_results = similarity_search(query: question, k: k)
131
132
 
132
133
  context = search_results.map do |result|
133
134
  result.dig("content").to_s
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.11"
4
+ VERSION = "0.6.13"
5
5
  end
data/lib/langchain.rb CHANGED
@@ -3,8 +3,25 @@
3
3
  require "logger"
4
4
  require "pathname"
5
5
  require "colorize"
6
-
7
- require_relative "./langchain/version"
6
+ require "zeitwerk"
7
+ loader = Zeitwerk::Loader.for_gem
8
+ loader.ignore("#{__dir__}/langchainrb.rb")
9
+ loader.inflector.inflect(
10
+ "ai_message" => "AIMessage",
11
+ "ai21" => "AI21",
12
+ "ai21_validator" => "AI21Validator",
13
+ "csv" => "CSV",
14
+ "html" => "HTML",
15
+ "json" => "JSON",
16
+ "jsonl" => "JSONL",
17
+ "llm" => "LLM",
18
+ "openai" => "OpenAI",
19
+ "openai_validator" => "OpenAIValidator",
20
+ "pdf" => "PDF",
21
+ "react_agent" => "ReActAgent",
22
+ "sql_query_agent" => "SQLQueryAgent"
23
+ )
24
+ loader.setup
8
25
 
9
26
  # Langchain.rb a is library for building LLM-backed Ruby applications. It is an abstraction layer that sits on top of the emerging AI-related tools that makes it easy for developers to consume and string those services together.
10
27
  #
@@ -48,13 +65,6 @@ require_relative "./langchain/version"
48
65
  #
49
66
  # Langchain.logger.level = :info
50
67
  module Langchain
51
- autoload :Loader, "langchain/loader"
52
- autoload :Data, "langchain/data"
53
- autoload :Conversation, "langchain/conversation"
54
- autoload :ConversationMemory, "langchain/conversation_memory"
55
- autoload :DependencyHelper, "langchain/dependency_helper"
56
- autoload :ContextualLogger, "langchain/contextual_logger"
57
-
58
68
  class << self
59
69
  # @return [ContextualLogger]
60
70
  attr_reader :logger
@@ -73,95 +83,7 @@ module Langchain
73
83
 
74
84
  @root = Pathname.new(__dir__)
75
85
 
76
- module Agent
77
- autoload :Base, "langchain/agent/base"
78
- autoload :ReActAgent, "langchain/agent/react_agent/react_agent.rb"
79
- autoload :SQLQueryAgent, "langchain/agent/sql_query_agent/sql_query_agent.rb"
80
- end
81
-
82
- module Chunker
83
- autoload :Base, "langchain/chunker/base"
84
- autoload :Text, "langchain/chunker/text"
85
- autoload :RecursiveText, "langchain/chunker/recursive_text"
86
- end
87
-
88
- module Tool
89
- autoload :Base, "langchain/tool/base"
90
- autoload :Calculator, "langchain/tool/calculator"
91
- autoload :RubyCodeInterpreter, "langchain/tool/ruby_code_interpreter"
92
- autoload :GoogleSearch, "langchain/tool/google_search"
93
- autoload :Weather, "langchain/tool/weather"
94
- autoload :Wikipedia, "langchain/tool/wikipedia"
95
- autoload :Database, "langchain/tool/database"
96
- end
97
-
98
- module Processors
99
- autoload :Base, "langchain/processors/base"
100
- autoload :CSV, "langchain/processors/csv"
101
- autoload :Docx, "langchain/processors/docx"
102
- autoload :HTML, "langchain/processors/html"
103
- autoload :JSON, "langchain/processors/json"
104
- autoload :JSONL, "langchain/processors/jsonl"
105
- autoload :PDF, "langchain/processors/pdf"
106
- autoload :Text, "langchain/processors/text"
107
- autoload :Xlsx, "langchain/processors/xlsx"
108
- end
109
-
110
- module Utils
111
- module TokenLength
112
- autoload :BaseValidator, "langchain/utils/token_length/base_validator"
113
- autoload :AI21Validator, "langchain/utils/token_length/ai21_validator"
114
- autoload :CohereValidator, "langchain/utils/token_length/cohere_validator"
115
- autoload :GooglePalmValidator, "langchain/utils/token_length/google_palm_validator"
116
- autoload :OpenAIValidator, "langchain/utils/token_length/openai_validator"
117
- autoload :TokenLimitExceeded, "langchain/utils/token_length/token_limit_exceeded"
118
- end
119
- end
120
-
121
- module Vectorsearch
122
- autoload :Base, "langchain/vectorsearch/base"
123
- autoload :Chroma, "langchain/vectorsearch/chroma"
124
- autoload :Hnswlib, "langchain/vectorsearch/hnswlib"
125
- autoload :Milvus, "langchain/vectorsearch/milvus"
126
- autoload :Pinecone, "langchain/vectorsearch/pinecone"
127
- autoload :Pgvector, "langchain/vectorsearch/pgvector"
128
- autoload :Qdrant, "langchain/vectorsearch/qdrant"
129
- autoload :Weaviate, "langchain/vectorsearch/weaviate"
130
- end
131
-
132
- module LLM
133
- autoload :AI21, "langchain/llm/ai21"
134
- autoload :Anthropic, "langchain/llm/anthropic"
135
- autoload :Base, "langchain/llm/base"
136
- autoload :Cohere, "langchain/llm/cohere"
137
- autoload :GooglePalm, "langchain/llm/google_palm"
138
- autoload :HuggingFace, "langchain/llm/hugging_face"
139
- autoload :LlamaCpp, "langchain/llm/llama_cpp"
140
- autoload :OpenAI, "langchain/llm/openai"
141
- autoload :Replicate, "langchain/llm/replicate"
142
- end
143
-
144
- module Prompt
145
- require_relative "langchain/prompt/loading"
146
-
147
- autoload :Base, "langchain/prompt/base"
148
- autoload :PromptTemplate, "langchain/prompt/prompt_template"
149
- autoload :FewShotPromptTemplate, "langchain/prompt/few_shot_prompt_template"
150
- end
151
-
152
- module ActiveRecord
153
- autoload :Hooks, "langchain/active_record/hooks"
154
- end
155
-
156
- module OutputParsers
157
- autoload :Base, "langchain/output_parsers/base"
158
- autoload :StructuredOutputParser, "langchain/output_parsers/structured"
159
- autoload :OutputFixingParser, "langchain/output_parsers/fix"
160
- end
161
-
162
86
  module Errors
163
87
  class BaseError < StandardError; end
164
88
  end
165
89
  end
166
-
167
- require "langchain/railtie" if defined?(Rails)