langchainrb 0.6.10 → 0.6.12

Sign up to get free protection for your applications and to get access to all the features.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4 -0
  3. data/README.md +5 -7
  4. data/lib/langchain/agent/base.rb +1 -0
  5. data/lib/langchain/agent/{react_agent/react_agent.rb → react_agent.rb} +12 -11
  6. data/lib/langchain/ai_message.rb +9 -0
  7. data/lib/langchain/conversation.rb +11 -11
  8. data/lib/langchain/conversation_memory.rb +3 -7
  9. data/lib/langchain/human_message.rb +9 -0
  10. data/lib/langchain/llm/anthropic.rb +3 -2
  11. data/lib/langchain/llm/cohere.rb +2 -1
  12. data/lib/langchain/llm/google_palm.rb +15 -10
  13. data/lib/langchain/llm/llama_cpp.rb +5 -5
  14. data/lib/langchain/llm/openai.rb +24 -25
  15. data/lib/langchain/llm/replicate.rb +2 -1
  16. data/lib/langchain/loader.rb +2 -2
  17. data/lib/langchain/message.rb +35 -0
  18. data/lib/langchain/output_parsers/base.rb +5 -4
  19. data/lib/langchain/output_parsers/{fix.rb → output_fixing_parser.rb} +3 -1
  20. data/lib/langchain/prompt/loading.rb +73 -67
  21. data/lib/langchain/prompt.rb +5 -0
  22. data/lib/langchain/system_message.rb +9 -0
  23. data/lib/langchain/tool/base.rb +14 -14
  24. data/lib/langchain/vectorsearch/milvus.rb +46 -5
  25. data/lib/langchain/vectorsearch/pgvector.rb +7 -5
  26. data/lib/langchain/version.rb +1 -1
  27. data/lib/langchain.rb +19 -97
  28. metadata +37 -38
  29. data/.env.example +0 -21
  30. data/.rspec +0 -3
  31. data/.rubocop.yml +0 -11
  32. data/.tool-versions +0 -1
  33. data/Gemfile +0 -14
  34. data/Gemfile.lock +0 -360
  35. data/Rakefile +0 -17
  36. data/examples/conversation_with_openai.rb +0 -52
  37. data/examples/create_and_manage_few_shot_prompt_templates.rb +0 -36
  38. data/examples/create_and_manage_prompt_templates.rb +0 -25
  39. data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +0 -116
  40. data/examples/llama_cpp.rb +0 -24
  41. data/examples/open_ai_function_calls.rb +0 -41
  42. data/examples/open_ai_qdrant_function_calls.rb +0 -43
  43. data/examples/pdf_store_and_query_with_chroma.rb +0 -40
  44. data/examples/store_and_query_with_pinecone.rb +0 -46
  45. data/examples/store_and_query_with_qdrant.rb +0 -37
  46. data/examples/store_and_query_with_weaviate.rb +0 -32
  47. data/lefthook.yml +0 -5
  48. data/sig/langchain.rbs +0 -4
  49. /data/lib/langchain/agent/{sql_query_agent/sql_query_agent.rb → sql_query_agent.rb} +0 -0
  50. /data/lib/langchain/output_parsers/{structured.rb → structured_output_parser.rb} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ba1fb0e3fbc05e4279fe3a698ad8fb1a25e02788991a8e6a7b27b411771096f3
4
- data.tar.gz: 309cda1c8c7a4982b22c6ad2f82c20fb12ca3bdfdc3e8c0ebeaa9687a7f71ce0
3
+ metadata.gz: d4567e8a572ad802d06af2571fd5be51a0376a7507f74474eb0598198b5cc29a
4
+ data.tar.gz: 1032626da89febc17cbf76db2f37a17fbfe53398291cd8fa6e3726d621b9f429
5
5
  SHA512:
6
- metadata.gz: 3457cbad7efbc5504f4cb3b684e3837984be5d486c1ee21a718508d606dd63ccc2223dffc55f3cc9c52f5fd0a533b364407bbb4c3208515e2bab8ca2af9ea60a
7
- data.tar.gz: 1ff3cded239c286ee87d7a2f0a1cfa45c734b185e35b6e6fbc63fd2951f7c7397e8562fa145a16f545ef07b2e983f40715010e9c96653b5f2bf128325a5a7577
6
+ metadata.gz: 1119378271d50091473d3999c16c78479da6fe7609e1583d3a2f964e7edc1f4802ee98646e4a22d784cd600cdbfdff2da9313ca492f77d2f2743a6ae6082f9bd
7
+ data.tar.gz: c85e6f0b76bb982546531cc9a2403c4bb9de60b9270078739c1591a563ab6e5404bb94177cef7281e9cbfdfb2938386f3bbdfdd182d202d3bedabef2dfd1383c
data/CHANGELOG.md CHANGED
@@ -1,5 +1,9 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.6.12] - 2023-08-13
4
+
5
+ ## [0.6.11] - 2023-08-08
6
+
3
7
  ## [0.6.10] - 2023-08-01
4
8
  - 🗣️ LLMs
5
9
  - Introducing Anthropic support
data/README.md CHANGED
@@ -6,7 +6,7 @@
6
6
 
7
7
  :warning: UNDER ACTIVE AND RAPID DEVELOPMENT (MAY BE BUGGY AND UNTESTED)
8
8
 
9
- ![Tests status](https://github.com/andreibondarev/langchainrb/actions/workflows/ci.yml/badge.svg)
9
+ ![Tests status](https://github.com/andreibondarev/langchainrb/actions/workflows/ci.yml/badge.svg?branch=main)
10
10
  [![Gem Version](https://badge.fury.io/rb/langchainrb.svg)](https://badge.fury.io/rb/langchainrb)
11
11
  [![Docs](http://img.shields.io/badge/yard-docs-blue.svg)](http://rubydoc.info/gems/langchainrb)
12
12
  [![License](https://img.shields.io/badge/license-MIT-green.svg)](https://github.com/andreibondarev/langchainrb/blob/main/LICENSE.txt)
@@ -161,11 +161,8 @@ qdrant:
161
161
 
162
162
  ```ruby
163
163
  client.llm.functions = functions
164
- client.llm.complete_response = true
165
164
  ```
166
165
 
167
- `complete_response` will return the entire choices data from the gpt response
168
-
169
166
  #### Cohere
170
167
  Add `gem "cohere-ruby", "~> 0.9.3"` to your Gemfile.
171
168
 
@@ -209,6 +206,10 @@ Add `gem "anthropic", "~> 0.1.0"` to your Gemfile.
209
206
  anthropic = Langchain::LLM::Anthropic.new(api_key: ENV["ANTHROPIC_API_KEY"])
210
207
  ```
211
208
 
209
+ ```ruby
210
+ anthropic.complete(prompt: "What is the meaning of life?")
211
+ ```
212
+
212
213
  ### Using Prompts 📋
213
214
 
214
215
  #### Prompt Templates
@@ -418,9 +419,6 @@ agent = Langchain::Agent::ReActAgent.new(
418
419
  llm: openai,
419
420
  tools: [search_tool, calculator]
420
421
  )
421
-
422
- agent.tools
423
- # => ["google_search", "calculator"]
424
422
  ```
425
423
  ```ruby
426
424
  agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
@@ -7,6 +7,7 @@ module Langchain::Agent
7
7
  #
8
8
  # Available:
9
9
  # - {Langchain::Agent::ReActAgent}
10
+ # - {Langchain::Agent::SQLQueryAgent}
10
11
  #
11
12
  # @abstract
12
13
  class Base
@@ -7,12 +7,13 @@ module Langchain::Agent
7
7
  #
8
8
  # agent = Langchain::Agent::ReActAgent.new(
9
9
  # llm: llm,
10
- # tools: ["google_search", "calculator", "wikipedia"]
10
+ # tools: [
11
+ # Langchain::Tool::GoogleSearch.new(api_key: "YOUR_API_KEY"),
12
+ # Langchain::Tool::Calculator.new,
13
+ # Langchain::Tool::Wikipedia.new
14
+ # ]
11
15
  # )
12
16
  #
13
- # agent.tools
14
- # # => ["google_search", "calculator", "wikipedia"]
15
- #
16
17
  # agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
17
18
  # #=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
18
19
  class ReActAgent < Base
@@ -21,7 +22,7 @@ module Langchain::Agent
21
22
  # Initializes the Agent
22
23
  #
23
24
  # @param llm [Object] The LLM client to use
24
- # @param tools [Array] The tools to use
25
+ # @param tools [Array<Tool>] The tools to use
25
26
  # @param max_iterations [Integer] The maximum number of iterations to run
26
27
  # @return [ReActAgent] The Agent::ReActAgent instance
27
28
  def initialize(llm:, tools: [], max_iterations: 10)
@@ -35,8 +36,8 @@ module Langchain::Agent
35
36
 
36
37
  # Validate tools when they're re-assigned
37
38
  #
38
- # @param value [Array] The tools to use
39
- # @return [Array] The tools that will be used
39
+ # @param value [Array<Tool>] The tools to use
40
+ # @return [Array<Tool>] The tools that will be used
40
41
  def tools=(value)
41
42
  Langchain::Tool::Base.validate_tools!(tools: value)
42
43
  @tools = value
@@ -70,7 +71,7 @@ module Langchain::Agent
70
71
  action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
71
72
 
72
73
  # Find the Tool and call `execute`` with action_input as the input
73
- tool = tools.find { |tool| tool.tool_name == action.strip }
74
+ tool = tools.find { |tool| tool.name == action.strip }
74
75
  Langchain.logger.info("Invoking \"#{tool.class}\" Tool with \"#{action_input}\"", for: self.class)
75
76
 
76
77
  # Call `execute` with action_input as the input
@@ -99,15 +100,15 @@ module Langchain::Agent
99
100
  # @param tools [Array] Tools to use
100
101
  # @return [String] Prompt
101
102
  def create_prompt(question:, tools:)
102
- tool_list = tools.map(&:tool_name)
103
+ tool_list = tools.map(&:name)
103
104
 
104
105
  prompt_template.format(
105
106
  date: Date.today.strftime("%B %d, %Y"),
106
107
  question: question,
107
108
  tool_names: "[#{tool_list.join(", ")}]",
108
109
  tools: tools.map do |tool|
109
- tool_name = tool.tool_name
110
- tool_description = tool.tool_description
110
+ tool_name = tool.name
111
+ tool_description = tool.description
111
112
  "#{tool_name}: #{tool_description}"
112
113
  end.join("\n")
113
114
  )
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class AIMessage < Message
5
+ def type
6
+ "ai"
7
+ end
8
+ end
9
+ end
@@ -39,45 +39,45 @@ module Langchain
39
39
 
40
40
  def set_functions(functions)
41
41
  @llm.functions = functions
42
- @llm.complete_response = true
43
42
  end
44
43
 
45
44
  # Set the context of the conversation. Usually used to set the model's persona.
46
45
  # @param message [String] The context of the conversation
47
46
  def set_context(message)
48
- @memory.set_context message
47
+ @memory.set_context SystemMessage.new(message)
49
48
  end
50
49
 
51
50
  # Add examples to the conversation. Used to give the model a sense of the conversation.
52
- # @param examples [Array<Hash>] The examples to add to the conversation
51
+ # @param examples [Array<AIMessage|HumanMessage>] The examples to add to the conversation
53
52
  def add_examples(examples)
54
53
  @memory.add_examples examples
55
54
  end
56
55
 
57
56
  # Message the model with a prompt and return the response.
58
57
  # @param message [String] The prompt to message the model with
59
- # @return [String] The response from the model
58
+ # @return [AIMessage] The response from the model
60
59
  def message(message)
61
- @memory.append_user_message(message)
62
- response = llm_response(message)
63
- @memory.append_ai_message(response)
64
- response
60
+ human_message = HumanMessage.new(message)
61
+ @memory.append_message(human_message)
62
+ ai_message = llm_response(human_message)
63
+ @memory.append_message(ai_message)
64
+ ai_message
65
65
  end
66
66
 
67
67
  # Messages from conversation memory
68
- # @return [Array<Hash>] The messages from the conversation memory
68
+ # @return [Array<AIMessage|HumanMessage>] The messages from the conversation memory
69
69
  def messages
70
70
  @memory.messages
71
71
  end
72
72
 
73
73
  # Context from conversation memory
74
- # @return [String] Context from conversation memory
74
+ # @return [SystemMessage] Context from conversation memory
75
75
  def context
76
76
  @memory.context
77
77
  end
78
78
 
79
79
  # Examples from conversation memory
80
- # @return [Array<Hash>] Examples from the conversation memory
80
+ # @return [Array<AIMessage|HumanMessage>] Examples from the conversation memory
81
81
  def examples
82
82
  @memory.examples
83
83
  end
@@ -25,12 +25,8 @@ module Langchain
25
25
  @examples.concat examples
26
26
  end
27
27
 
28
- def append_ai_message(message)
29
- @messages << {role: "ai", content: message}
30
- end
31
-
32
- def append_user_message(message)
33
- @messages << {role: "user", content: message}
28
+ def append_message(message)
29
+ @messages.append(message)
34
30
  end
35
31
 
36
32
  def reduce_messages(exception)
@@ -47,7 +43,7 @@ module Langchain
47
43
  def context
48
44
  return if @context.nil? && @summary.nil?
49
45
 
50
- [@context, @summary].compact.join("\n")
46
+ SystemMessage.new([@context, @summary].compact.join("\n"))
51
47
  end
52
48
 
53
49
  private
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class HumanMessage < Message
5
+ def type
6
+ "human"
7
+ end
8
+ end
9
+ end
@@ -13,7 +13,8 @@ module Langchain::LLM
13
13
  class Anthropic < Base
14
14
  DEFAULTS = {
15
15
  temperature: 0.0,
16
- completion_model_name: "claude-2"
16
+ completion_model_name: "claude-2",
17
+ max_tokens_to_sample: 256
17
18
  }.freeze
18
19
 
19
20
  # TODO: Implement token length validator for Anthropic
@@ -49,7 +50,7 @@ module Langchain::LLM
49
50
  private
50
51
 
51
52
  def compose_parameters(model, params)
52
- default_params = {model: model, temperature: @defaults[:temperature]}
53
+ default_params = {model: model}.merge(@defaults.except(:completion_model_name))
53
54
 
54
55
  default_params.merge(params)
55
56
  end
@@ -70,7 +70,8 @@ module Langchain::LLM
70
70
 
71
71
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
72
72
  def chat(...)
73
- complete(...)
73
+ response_text = complete(...)
74
+ Langchain::AIMessage.new(response_text)
74
75
  end
75
76
 
76
77
  # Generate a summary in English for a given text
@@ -19,6 +19,9 @@ module Langchain::LLM
19
19
  embeddings_model_name: "embedding-gecko-001"
20
20
  }.freeze
21
21
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::GooglePalmValidator
22
+ ROLE_MAPPING = {
23
+ "human" => "user"
24
+ }
22
25
 
23
26
  def initialize(api_key:, default_options: {})
24
27
  depends_on "google_palm_api"
@@ -72,10 +75,12 @@ module Langchain::LLM
72
75
  #
73
76
  # Generate a chat completion for a given prompt
74
77
  #
75
- # @param prompt [String] The prompt to generate a chat completion for
76
- # @param messages [Array] The messages that have been sent in the conversation
77
- # @param params extra parameters passed to GooglePalmAPI::Client#generate_chat_message
78
- # @return [String] The chat completion
78
+ # @param prompt [HumanMessage] The prompt to generate a chat completion for
79
+ # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
80
+ # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
81
+ # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
82
+ # @param options [Hash] extra parameters passed to GooglePalmAPI::Client#generate_chat_message
83
+ # @return [AIMessage] The chat completion
79
84
  #
80
85
  def chat(prompt: "", messages: [], context: "", examples: [], **options)
81
86
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
@@ -83,7 +88,7 @@ module Langchain::LLM
83
88
  default_params = {
84
89
  temperature: @defaults[:temperature],
85
90
  model: @defaults[:chat_completion_model_name],
86
- context: context,
91
+ context: context.to_s,
87
92
  messages: compose_chat_messages(prompt: prompt, messages: messages),
88
93
  examples: compose_examples(examples)
89
94
  }
@@ -104,7 +109,7 @@ module Langchain::LLM
104
109
  response = client.generate_chat_message(**default_params)
105
110
  raise "GooglePalm API returned an error: #{response}" if response.dig("error")
106
111
 
107
- response.dig("candidates", 0, "content")
112
+ Langchain::AIMessage.new(response.dig("candidates", 0, "content"))
108
113
  end
109
114
 
110
115
  #
@@ -146,8 +151,8 @@ module Langchain::LLM
146
151
  def compose_examples(examples)
147
152
  examples.each_slice(2).map do |example|
148
153
  {
149
- input: {content: example.first[:content]},
150
- output: {content: example.last[:content]}
154
+ input: {content: example.first.content},
155
+ output: {content: example.last.content}
151
156
  }
152
157
  end
153
158
  end
@@ -155,8 +160,8 @@ module Langchain::LLM
155
160
  def transform_messages(messages)
156
161
  messages.map do |message|
157
162
  {
158
- author: message[:role] || message["role"],
159
- content: message[:content] || message["content"]
163
+ author: ROLE_MAPPING.fetch(message.type, message.type),
164
+ content: message.content
160
165
  }
161
166
  end
162
167
  end
@@ -33,8 +33,8 @@ module Langchain::LLM
33
33
  @seed = seed
34
34
  end
35
35
 
36
- # @params text [String] The text to embed
37
- # @params n_threads [Integer] The number of CPU threads to use
36
+ # @param text [String] The text to embed
37
+ # @param n_threads [Integer] The number of CPU threads to use
38
38
  # @return [Array] The embedding
39
39
  def embed(text:, n_threads: nil)
40
40
  # contexts are kinda stateful when it comes to embeddings, so allocate one each time
@@ -49,9 +49,9 @@ module Langchain::LLM
49
49
  context.embeddings
50
50
  end
51
51
 
52
- # @params prompt [String] The prompt to complete
53
- # @params n_predict [Integer] The number of tokens to predict
54
- # @params n_threads [Integer] The number of CPU threads to use
52
+ # @param prompt [String] The prompt to complete
53
+ # @param n_predict [Integer] The number of tokens to predict
54
+ # @param n_threads [Integer] The number of CPU threads to use
55
55
  # @return [String] The completed prompt
56
56
  def complete(prompt:, n_predict: 128, n_threads: nil)
57
57
  n_threads ||= self.n_threads
@@ -18,8 +18,12 @@ module Langchain::LLM
18
18
  dimension: 1536
19
19
  }.freeze
20
20
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
21
+ ROLE_MAPPING = {
22
+ "ai" => "assistant",
23
+ "human" => "user"
24
+ }
21
25
 
22
- attr_accessor :functions, :complete_response
26
+ attr_accessor :functions
23
27
 
24
28
  def initialize(api_key:, llm_options: {}, default_options: {})
25
29
  depends_on "ruby-openai"
@@ -98,19 +102,13 @@ module Langchain::LLM
98
102
  # },
99
103
  # ]
100
104
  #
101
- # @param prompt [String] The prompt to generate a chat completion for
102
- # @param messages [Array<Hash>] The messages that have been sent in the conversation
103
- # Each message should be a Hash with the following keys:
104
- # - :content [String] The content of the message
105
- # - :role [String] The role of the sender (system, user, assistant, or function)
106
- # @param context [String] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
107
- # @param examples [Array<Hash>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
108
- # Each message should be a Hash with the following keys:
109
- # - :content [String] The content of the message
110
- # - :role [String] The role of the sender (system, user, assistant, or function)
111
- # @param options <Hash> extra parameters passed to OpenAI::Client#chat
112
- # @yield [String] Stream responses back one String at a time
113
- # @return [String] The chat completion
105
+ # @param prompt [HumanMessage] The prompt to generate a chat completion for
106
+ # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
107
+ # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
108
+ # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
109
+ # @param options [Hash] extra parameters passed to OpenAI::Client#chat
110
+ # @yield [AIMessage] Stream responses back one String at a time
111
+ # @return [AIMessage] The chat completion
114
112
  #
115
113
  def chat(prompt: "", messages: [], context: "", examples: [], **options)
116
114
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
@@ -126,16 +124,20 @@ module Langchain::LLM
126
124
 
127
125
  if (streaming = block_given?)
128
126
  parameters[:stream] = proc do |chunk, _bytesize|
129
- yield chunk if complete_response
130
- yield chunk.dig("choices", 0, "delta", "content") if !complete_response
127
+ delta = chunk.dig("choices", 0, "delta")
128
+ content = delta["content"]
129
+ additional_kwargs = {function_call: delta["function_call"]}.compact
130
+ yield Langchain::AIMessage.new(content, additional_kwargs)
131
131
  end
132
132
  end
133
133
 
134
134
  response = client.chat(parameters: parameters)
135
135
  raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
136
136
  unless streaming
137
- return response.dig("choices", 0, "message", "content") if !complete_response
138
- return response if complete_response
137
+ message = response.dig("choices", 0, "message")
138
+ content = message["content"]
139
+ additional_kwargs = {function_call: message["function_call"]}.compact
140
+ Langchain::AIMessage.new(content.to_s, additional_kwargs)
139
141
  end
140
142
  end
141
143
 
@@ -171,9 +173,9 @@ module Langchain::LLM
171
173
 
172
174
  history.concat transform_messages(messages) unless messages.empty?
173
175
 
174
- unless context.nil? || context.empty?
176
+ unless context.nil? || context.to_s.empty?
175
177
  history.reject! { |message| message[:role] == "system" }
176
- history.prepend({role: "system", content: context})
178
+ history.prepend({role: "system", content: context.content})
177
179
  end
178
180
 
179
181
  unless prompt.empty?
@@ -189,12 +191,9 @@ module Langchain::LLM
189
191
 
190
192
  def transform_messages(messages)
191
193
  messages.map do |message|
192
- role = message[:role] || message["role"]
193
- content = message[:content] || message["content"]
194
-
195
194
  {
196
- content: content,
197
- role: (role == "ai") ? "assistant" : role
195
+ role: ROLE_MAPPING.fetch(message.type, message.type),
196
+ content: message.content
198
197
  }
199
198
  end
200
199
  end
@@ -84,7 +84,8 @@ module Langchain::LLM
84
84
 
85
85
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
86
86
  def chat(...)
87
- complete(...)
87
+ response_text = complete(...)
88
+ Langchain::AIMessage.new(response_text)
88
89
  end
89
90
 
90
91
  #
@@ -89,9 +89,9 @@ module Langchain
89
89
  end
90
90
 
91
91
  def load_from_path
92
- raise FileNotFound unless File.exist?(@path)
92
+ return File.open(@path) if File.exist?(@path)
93
93
 
94
- File.open(@path)
94
+ raise FileNotFound, "File #{@path} does not exist"
95
95
  end
96
96
 
97
97
  def load_from_directory(&block)
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Message
5
+ attr_reader :content, :additional_kwargs
6
+
7
+ def initialize(content, additional_kwargs = nil)
8
+ @content = content
9
+ @additional_kwargs = additional_kwargs
10
+ end
11
+
12
+ def type
13
+ raise NotImplementedError
14
+ end
15
+
16
+ def to_s
17
+ content
18
+ end
19
+
20
+ def ==(other)
21
+ to_json == other.to_json
22
+ end
23
+
24
+ def to_json(options = {})
25
+ hash = {
26
+ type: type,
27
+ content: content
28
+ }
29
+
30
+ hash[:additional_kwargs] = additional_kwargs unless additional_kwargs.nil? || additional_kwargs.empty?
31
+
32
+ hash.to_json
33
+ end
34
+ end
35
+ end
@@ -9,7 +9,8 @@ module Langchain::OutputParsers
9
9
  # Parse the output of an LLM call.
10
10
  #
11
11
  # @param text - LLM output to parse.
12
- # @returns Parsed output.
12
+ #
13
+ # @return [Object] Parsed output.
13
14
  #
14
15
  def parse(text:)
15
16
  raise NotImplementedError
@@ -18,9 +19,9 @@ module Langchain::OutputParsers
18
19
  #
19
20
  # Return a string describing the format of the output.
20
21
  #
21
- # @returns Format instructions.
22
- # @param options - Options for formatting instructions.
23
- # @example
22
+ # @return [String] Format instructions.
23
+ #
24
+ # @example returns the format instructions
24
25
  # ```json
25
26
  # {
26
27
  # "foo": "bar"
@@ -65,7 +65,9 @@ module Langchain::OutputParsers
65
65
  #
66
66
  # Creates a new instance of the class using the given JSON::Schema.
67
67
  #
68
- # @param schema [JSON::Schema] The JSON::Schema to use
68
+ # @param llm [Langchain::LLM] The LLM used in the fixing process
69
+ # @param parser [Langchain::OutputParsers] The parser originally used which resulted in parsing error
70
+ # @param prompt [Langchain::Prompt::PromptTemplate]
69
71
  #
70
72
  # @return [Object] A new instance of the class
71
73
  #