langchainrb 0.6.11 → 0.6.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +5 -11
  4. data/lib/langchain/agent/base.rb +1 -0
  5. data/lib/langchain/agent/{react_agent/react_agent.rb → react_agent.rb} +12 -11
  6. data/lib/langchain/ai_message.rb +9 -0
  7. data/lib/langchain/conversation.rb +11 -11
  8. data/lib/langchain/conversation_memory.rb +3 -7
  9. data/lib/langchain/human_message.rb +9 -0
  10. data/lib/langchain/llm/cohere.rb +3 -2
  11. data/lib/langchain/llm/google_palm.rb +16 -11
  12. data/lib/langchain/llm/llama_cpp.rb +5 -5
  13. data/lib/langchain/llm/openai.rb +24 -25
  14. data/lib/langchain/llm/replicate.rb +2 -1
  15. data/lib/langchain/loader.rb +3 -2
  16. data/lib/langchain/message.rb +35 -0
  17. data/lib/langchain/output_parsers/base.rb +5 -4
  18. data/lib/langchain/output_parsers/{fix.rb → output_fixing_parser.rb} +3 -1
  19. data/lib/langchain/prompt/loading.rb +73 -67
  20. data/lib/langchain/prompt.rb +5 -0
  21. data/lib/langchain/system_message.rb +9 -0
  22. data/lib/langchain/tool/base.rb +14 -14
  23. data/lib/langchain/vectorsearch/chroma.rb +3 -2
  24. data/lib/langchain/vectorsearch/milvus.rb +4 -3
  25. data/lib/langchain/vectorsearch/pgvector.rb +10 -7
  26. data/lib/langchain/vectorsearch/pinecone.rb +18 -2
  27. data/lib/langchain/vectorsearch/qdrant.rb +4 -3
  28. data/lib/langchain/vectorsearch/weaviate.rb +3 -2
  29. data/lib/langchain/version.rb +1 -1
  30. data/lib/langchain.rb +19 -97
  31. metadata +49 -50
  32. data/.env.example +0 -21
  33. data/.rspec +0 -3
  34. data/.rubocop.yml +0 -11
  35. data/.tool-versions +0 -1
  36. data/Gemfile +0 -14
  37. data/Gemfile.lock +0 -360
  38. data/Rakefile +0 -17
  39. data/examples/conversation_with_openai.rb +0 -52
  40. data/examples/create_and_manage_few_shot_prompt_templates.rb +0 -36
  41. data/examples/create_and_manage_prompt_templates.rb +0 -25
  42. data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +0 -116
  43. data/examples/llama_cpp.rb +0 -24
  44. data/examples/open_ai_function_calls.rb +0 -41
  45. data/examples/open_ai_qdrant_function_calls.rb +0 -39
  46. data/examples/pdf_store_and_query_with_chroma.rb +0 -40
  47. data/examples/store_and_query_with_pinecone.rb +0 -46
  48. data/examples/store_and_query_with_qdrant.rb +0 -37
  49. data/examples/store_and_query_with_weaviate.rb +0 -32
  50. data/lefthook.yml +0 -5
  51. data/sig/langchain.rbs +0 -4
  52. /data/lib/langchain/agent/{sql_query_agent/sql_query_agent.rb → sql_query_agent.rb} +0 -0
  53. /data/lib/langchain/output_parsers/{structured.rb → structured_output_parser.rb} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 78d1726db950b4234799cdf34185110411cbc666836225a63ed9d5be6b0a5575
4
- data.tar.gz: 0ed5595b0aae9dcaa97ee5cbc653cf3c9cb21f6057f5439ec78711ecb1cf6b46
3
+ metadata.gz: 9a8dc8c16a235328e6122725804fc8dface37910d2014ecf44410631d3ec63cb
4
+ data.tar.gz: 5d69e6b1dda419d2834f9041a18eae48b2c63303cc901515cd883153635f0742
5
5
  SHA512:
6
- metadata.gz: c0313618b12b10943e8825aaabd66306e21b0a24fefe9c4593cb4f28b1a03e099781944057ce9423c98851bad7dea07e2832c4ab37d416cf743b126a26a6a308
7
- data.tar.gz: a2910e93f883bee84288e5bd22644f17b30957a6ae9a696b3adc01dc9b32e2f546b49d46c3587856a883cd4dc1f0a677a3970d357b5fe2dfd9216ce894d06120
6
+ metadata.gz: 0c176e717986c0bb0b74761858bf0a6aac8e84fff179791fd67b68441760d5a419ae767bb678bab8a783bfaf6a17f53b7177b24d96fd8f1b076fd4f091c5443e
7
+ data.tar.gz: a0457aaf411f8932ada4bb3683fef81396334186047a5bb26040c0c03634443662806ad3d2d37ec81f4121f519e387f77688e395cc38882b7f8ab85c0795fba6
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.6.13] - 2023-08-23
4
+ - Add `k:` parameter to all `ask()` vector search methods
5
+ - Bump Faraday to 2.x
6
+
7
+ ## [0.6.12] - 2023-08-13
8
+
9
+ ## [0.6.11] - 2023-08-08
10
+
3
11
  ## [0.6.10] - 2023-08-01
4
12
  - 🗣️ LLMs
5
13
  - Introducing Anthropic support
data/README.md CHANGED
@@ -6,7 +6,7 @@
6
6
 
7
7
  :warning: UNDER ACTIVE AND RAPID DEVELOPMENT (MAY BE BUGGY AND UNTESTED)
8
8
 
9
- ![Tests status](https://github.com/andreibondarev/langchainrb/actions/workflows/ci.yml/badge.svg)
9
+ ![Tests status](https://github.com/andreibondarev/langchainrb/actions/workflows/ci.yml/badge.svg?branch=main)
10
10
  [![Gem Version](https://badge.fury.io/rb/langchainrb.svg)](https://badge.fury.io/rb/langchainrb)
11
11
  [![Docs](http://img.shields.io/badge/yard-docs-blue.svg)](http://rubydoc.info/gems/langchainrb)
12
12
  [![License](https://img.shields.io/badge/license-MIT-green.svg)](https://github.com/andreibondarev/langchainrb/blob/main/LICENSE.txt)
@@ -61,10 +61,10 @@ client = Langchain::Vectorsearch::Weaviate.new(
61
61
  # You can instantiate any other supported vector search database:
62
62
  client = Langchain::Vectorsearch::Chroma.new(...) # `gem "chroma-db", "~> 0.3.0"`
63
63
  client = Langchain::Vectorsearch::Hnswlib.new(...) # `gem "hnswlib", "~> 0.8.1"`
64
- client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.0"`
64
+ client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.2"`
65
65
  client = Langchain::Vectorsearch::Pinecone.new(...) # `gem "pinecone", "~> 0.1.6"`
66
66
  client = Langchain::Vectorsearch::Pgvector.new(...) # `gem "pgvector", "~> 0.2"`
67
- client = Langchain::Vectorsearch::Qdrant.new(...) # `gem"qdrant-ruby", "~> 0.9.0"`
67
+ client = Langchain::Vectorsearch::Qdrant.new(...) # `gem"qdrant-ruby", "~> 0.9.3"`
68
68
  ```
69
69
 
70
70
  ```ruby
@@ -161,13 +161,10 @@ qdrant:
161
161
 
162
162
  ```ruby
163
163
  client.llm.functions = functions
164
- client.llm.complete_response = true
165
164
  ```
166
165
 
167
- `complete_response` will return the entire choices data from the gpt response
168
-
169
166
  #### Cohere
170
- Add `gem "cohere-ruby", "~> 0.9.3"` to your Gemfile.
167
+ Add `gem "cohere-ruby", "~> 0.9.6"` to your Gemfile.
171
168
 
172
169
  ```ruby
173
170
  cohere = Langchain::LLM::Cohere.new(api_key: ENV["COHERE_API_KEY"])
@@ -192,7 +189,7 @@ replicate = Langchain::LLM::Replicate.new(api_key: ENV["REPLICATE_API_KEY"])
192
189
  ```
193
190
 
194
191
  #### Google PaLM (Pathways Language Model)
195
- Add `"google_palm_api", "~> 0.1.2"` to your Gemfile.
192
+ Add `"google_palm_api", "~> 0.1.3"` to your Gemfile.
196
193
  ```ruby
197
194
  google_palm = Langchain::LLM::GooglePalm.new(api_key: ENV["GOOGLE_PALM_API_KEY"])
198
195
  ```
@@ -422,9 +419,6 @@ agent = Langchain::Agent::ReActAgent.new(
422
419
  llm: openai,
423
420
  tools: [search_tool, calculator]
424
421
  )
425
-
426
- agent.tools
427
- # => ["google_search", "calculator"]
428
422
  ```
429
423
  ```ruby
430
424
  agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
@@ -7,6 +7,7 @@ module Langchain::Agent
7
7
  #
8
8
  # Available:
9
9
  # - {Langchain::Agent::ReActAgent}
10
+ # - {Langchain::Agent::SQLQueryAgent}
10
11
  #
11
12
  # @abstract
12
13
  class Base
@@ -7,12 +7,13 @@ module Langchain::Agent
7
7
  #
8
8
  # agent = Langchain::Agent::ReActAgent.new(
9
9
  # llm: llm,
10
- # tools: ["google_search", "calculator", "wikipedia"]
10
+ # tools: [
11
+ # Langchain::Tool::GoogleSearch.new(api_key: "YOUR_API_KEY"),
12
+ # Langchain::Tool::Calculator.new,
13
+ # Langchain::Tool::Wikipedia.new
14
+ # ]
11
15
  # )
12
16
  #
13
- # agent.tools
14
- # # => ["google_search", "calculator", "wikipedia"]
15
- #
16
17
  # agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
17
18
  # #=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
18
19
  class ReActAgent < Base
@@ -21,7 +22,7 @@ module Langchain::Agent
21
22
  # Initializes the Agent
22
23
  #
23
24
  # @param llm [Object] The LLM client to use
24
- # @param tools [Array] The tools to use
25
+ # @param tools [Array<Tool>] The tools to use
25
26
  # @param max_iterations [Integer] The maximum number of iterations to run
26
27
  # @return [ReActAgent] The Agent::ReActAgent instance
27
28
  def initialize(llm:, tools: [], max_iterations: 10)
@@ -35,8 +36,8 @@ module Langchain::Agent
35
36
 
36
37
  # Validate tools when they're re-assigned
37
38
  #
38
- # @param value [Array] The tools to use
39
- # @return [Array] The tools that will be used
39
+ # @param value [Array<Tool>] The tools to use
40
+ # @return [Array<Tool>] The tools that will be used
40
41
  def tools=(value)
41
42
  Langchain::Tool::Base.validate_tools!(tools: value)
42
43
  @tools = value
@@ -70,7 +71,7 @@ module Langchain::Agent
70
71
  action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
71
72
 
72
73
  # Find the Tool and call `execute`` with action_input as the input
73
- tool = tools.find { |tool| tool.tool_name == action.strip }
74
+ tool = tools.find { |tool| tool.name == action.strip }
74
75
  Langchain.logger.info("Invoking \"#{tool.class}\" Tool with \"#{action_input}\"", for: self.class)
75
76
 
76
77
  # Call `execute` with action_input as the input
@@ -99,15 +100,15 @@ module Langchain::Agent
99
100
  # @param tools [Array] Tools to use
100
101
  # @return [String] Prompt
101
102
  def create_prompt(question:, tools:)
102
- tool_list = tools.map(&:tool_name)
103
+ tool_list = tools.map(&:name)
103
104
 
104
105
  prompt_template.format(
105
106
  date: Date.today.strftime("%B %d, %Y"),
106
107
  question: question,
107
108
  tool_names: "[#{tool_list.join(", ")}]",
108
109
  tools: tools.map do |tool|
109
- tool_name = tool.tool_name
110
- tool_description = tool.tool_description
110
+ tool_name = tool.name
111
+ tool_description = tool.description
111
112
  "#{tool_name}: #{tool_description}"
112
113
  end.join("\n")
113
114
  )
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class AIMessage < Message
5
+ def type
6
+ "ai"
7
+ end
8
+ end
9
+ end
@@ -39,45 +39,45 @@ module Langchain
39
39
 
40
40
  def set_functions(functions)
41
41
  @llm.functions = functions
42
- @llm.complete_response = true
43
42
  end
44
43
 
45
44
  # Set the context of the conversation. Usually used to set the model's persona.
46
45
  # @param message [String] The context of the conversation
47
46
  def set_context(message)
48
- @memory.set_context message
47
+ @memory.set_context SystemMessage.new(message)
49
48
  end
50
49
 
51
50
  # Add examples to the conversation. Used to give the model a sense of the conversation.
52
- # @param examples [Array<Hash>] The examples to add to the conversation
51
+ # @param examples [Array<AIMessage|HumanMessage>] The examples to add to the conversation
53
52
  def add_examples(examples)
54
53
  @memory.add_examples examples
55
54
  end
56
55
 
57
56
  # Message the model with a prompt and return the response.
58
57
  # @param message [String] The prompt to message the model with
59
- # @return [String] The response from the model
58
+ # @return [AIMessage] The response from the model
60
59
  def message(message)
61
- @memory.append_user_message(message)
62
- response = llm_response(message)
63
- @memory.append_ai_message(response)
64
- response
60
+ human_message = HumanMessage.new(message)
61
+ @memory.append_message(human_message)
62
+ ai_message = llm_response(human_message)
63
+ @memory.append_message(ai_message)
64
+ ai_message
65
65
  end
66
66
 
67
67
  # Messages from conversation memory
68
- # @return [Array<Hash>] The messages from the conversation memory
68
+ # @return [Array<AIMessage|HumanMessage>] The messages from the conversation memory
69
69
  def messages
70
70
  @memory.messages
71
71
  end
72
72
 
73
73
  # Context from conversation memory
74
- # @return [String] Context from conversation memory
74
+ # @return [SystemMessage] Context from conversation memory
75
75
  def context
76
76
  @memory.context
77
77
  end
78
78
 
79
79
  # Examples from conversation memory
80
- # @return [Array<Hash>] Examples from the conversation memory
80
+ # @return [Array<AIMessage|HumanMessage>] Examples from the conversation memory
81
81
  def examples
82
82
  @memory.examples
83
83
  end
@@ -25,12 +25,8 @@ module Langchain
25
25
  @examples.concat examples
26
26
  end
27
27
 
28
- def append_ai_message(message)
29
- @messages << {role: "ai", content: message}
30
- end
31
-
32
- def append_user_message(message)
33
- @messages << {role: "user", content: message}
28
+ def append_message(message)
29
+ @messages.append(message)
34
30
  end
35
31
 
36
32
  def reduce_messages(exception)
@@ -47,7 +43,7 @@ module Langchain
47
43
  def context
48
44
  return if @context.nil? && @summary.nil?
49
45
 
50
- [@context, @summary].compact.join("\n")
46
+ SystemMessage.new([@context, @summary].compact.join("\n"))
51
47
  end
52
48
 
53
49
  private
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class HumanMessage < Message
5
+ def type
6
+ "human"
7
+ end
8
+ end
9
+ end
@@ -5,7 +5,7 @@ module Langchain::LLM
5
5
  # Wrapper around the Cohere API.
6
6
  #
7
7
  # Gem requirements:
8
- # gem "cohere-ruby", "~> 0.9.5"
8
+ # gem "cohere-ruby", "~> 0.9.6"
9
9
  #
10
10
  # Usage:
11
11
  # cohere = Langchain::LLM::Cohere.new(api_key: "YOUR_API_KEY")
@@ -70,7 +70,8 @@ module Langchain::LLM
70
70
 
71
71
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
72
72
  def chat(...)
73
- complete(...)
73
+ response_text = complete(...)
74
+ Langchain::AIMessage.new(response_text)
74
75
  end
75
76
 
76
77
  # Generate a summary in English for a given text
@@ -5,7 +5,7 @@ module Langchain::LLM
5
5
  # Wrapper around the Google PaLM (Pathways Language Model) APIs: https://ai.google/build/machine-learning/
6
6
  #
7
7
  # Gem requirements:
8
- # gem "google_palm_api", "~> 0.1.2"
8
+ # gem "google_palm_api", "~> 0.1.3"
9
9
  #
10
10
  # Usage:
11
11
  # google_palm = Langchain::LLM::GooglePalm.new(api_key: "YOUR_API_KEY")
@@ -19,6 +19,9 @@ module Langchain::LLM
19
19
  embeddings_model_name: "embedding-gecko-001"
20
20
  }.freeze
21
21
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::GooglePalmValidator
22
+ ROLE_MAPPING = {
23
+ "human" => "user"
24
+ }
22
25
 
23
26
  def initialize(api_key:, default_options: {})
24
27
  depends_on "google_palm_api"
@@ -72,10 +75,12 @@ module Langchain::LLM
72
75
  #
73
76
  # Generate a chat completion for a given prompt
74
77
  #
75
- # @param prompt [String] The prompt to generate a chat completion for
76
- # @param messages [Array] The messages that have been sent in the conversation
77
- # @param params extra parameters passed to GooglePalmAPI::Client#generate_chat_message
78
- # @return [String] The chat completion
78
+ # @param prompt [HumanMessage] The prompt to generate a chat completion for
79
+ # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
80
+ # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
81
+ # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
82
+ # @param options [Hash] extra parameters passed to GooglePalmAPI::Client#generate_chat_message
83
+ # @return [AIMessage] The chat completion
79
84
  #
80
85
  def chat(prompt: "", messages: [], context: "", examples: [], **options)
81
86
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
@@ -83,7 +88,7 @@ module Langchain::LLM
83
88
  default_params = {
84
89
  temperature: @defaults[:temperature],
85
90
  model: @defaults[:chat_completion_model_name],
86
- context: context,
91
+ context: context.to_s,
87
92
  messages: compose_chat_messages(prompt: prompt, messages: messages),
88
93
  examples: compose_examples(examples)
89
94
  }
@@ -104,7 +109,7 @@ module Langchain::LLM
104
109
  response = client.generate_chat_message(**default_params)
105
110
  raise "GooglePalm API returned an error: #{response}" if response.dig("error")
106
111
 
107
- response.dig("candidates", 0, "content")
112
+ Langchain::AIMessage.new(response.dig("candidates", 0, "content"))
108
113
  end
109
114
 
110
115
  #
@@ -146,8 +151,8 @@ module Langchain::LLM
146
151
  def compose_examples(examples)
147
152
  examples.each_slice(2).map do |example|
148
153
  {
149
- input: {content: example.first[:content]},
150
- output: {content: example.last[:content]}
154
+ input: {content: example.first.content},
155
+ output: {content: example.last.content}
151
156
  }
152
157
  end
153
158
  end
@@ -155,8 +160,8 @@ module Langchain::LLM
155
160
  def transform_messages(messages)
156
161
  messages.map do |message|
157
162
  {
158
- author: message[:role] || message["role"],
159
- content: message[:content] || message["content"]
163
+ author: ROLE_MAPPING.fetch(message.type, message.type),
164
+ content: message.content
160
165
  }
161
166
  end
162
167
  end
@@ -33,8 +33,8 @@ module Langchain::LLM
33
33
  @seed = seed
34
34
  end
35
35
 
36
- # @params text [String] The text to embed
37
- # @params n_threads [Integer] The number of CPU threads to use
36
+ # @param text [String] The text to embed
37
+ # @param n_threads [Integer] The number of CPU threads to use
38
38
  # @return [Array] The embedding
39
39
  def embed(text:, n_threads: nil)
40
40
  # contexts are kinda stateful when it comes to embeddings, so allocate one each time
@@ -49,9 +49,9 @@ module Langchain::LLM
49
49
  context.embeddings
50
50
  end
51
51
 
52
- # @params prompt [String] The prompt to complete
53
- # @params n_predict [Integer] The number of tokens to predict
54
- # @params n_threads [Integer] The number of CPU threads to use
52
+ # @param prompt [String] The prompt to complete
53
+ # @param n_predict [Integer] The number of tokens to predict
54
+ # @param n_threads [Integer] The number of CPU threads to use
55
55
  # @return [String] The completed prompt
56
56
  def complete(prompt:, n_predict: 128, n_threads: nil)
57
57
  n_threads ||= self.n_threads
@@ -18,8 +18,12 @@ module Langchain::LLM
18
18
  dimension: 1536
19
19
  }.freeze
20
20
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
21
+ ROLE_MAPPING = {
22
+ "ai" => "assistant",
23
+ "human" => "user"
24
+ }
21
25
 
22
- attr_accessor :functions, :complete_response
26
+ attr_accessor :functions
23
27
 
24
28
  def initialize(api_key:, llm_options: {}, default_options: {})
25
29
  depends_on "ruby-openai"
@@ -98,19 +102,13 @@ module Langchain::LLM
98
102
  # },
99
103
  # ]
100
104
  #
101
- # @param prompt [String] The prompt to generate a chat completion for
102
- # @param messages [Array<Hash>] The messages that have been sent in the conversation
103
- # Each message should be a Hash with the following keys:
104
- # - :content [String] The content of the message
105
- # - :role [String] The role of the sender (system, user, assistant, or function)
106
- # @param context [String] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
107
- # @param examples [Array<Hash>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
108
- # Each message should be a Hash with the following keys:
109
- # - :content [String] The content of the message
110
- # - :role [String] The role of the sender (system, user, assistant, or function)
111
- # @param options <Hash> extra parameters passed to OpenAI::Client#chat
112
- # @yield [String] Stream responses back one String at a time
113
- # @return [String] The chat completion
105
+ # @param prompt [HumanMessage] The prompt to generate a chat completion for
106
+ # @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
107
+ # @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
108
+ # @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
109
+ # @param options [Hash] extra parameters passed to OpenAI::Client#chat
110
+ # @yield [AIMessage] Stream responses back one String at a time
111
+ # @return [AIMessage] The chat completion
114
112
  #
115
113
  def chat(prompt: "", messages: [], context: "", examples: [], **options)
116
114
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
@@ -126,16 +124,20 @@ module Langchain::LLM
126
124
 
127
125
  if (streaming = block_given?)
128
126
  parameters[:stream] = proc do |chunk, _bytesize|
129
- yield chunk if complete_response
130
- yield chunk.dig("choices", 0, "delta", "content") if !complete_response
127
+ delta = chunk.dig("choices", 0, "delta")
128
+ content = delta["content"]
129
+ additional_kwargs = {function_call: delta["function_call"]}.compact
130
+ yield Langchain::AIMessage.new(content, additional_kwargs)
131
131
  end
132
132
  end
133
133
 
134
134
  response = client.chat(parameters: parameters)
135
135
  raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
136
136
  unless streaming
137
- return response.dig("choices", 0, "message", "content") if !complete_response
138
- return response if complete_response
137
+ message = response.dig("choices", 0, "message")
138
+ content = message["content"]
139
+ additional_kwargs = {function_call: message["function_call"]}.compact
140
+ Langchain::AIMessage.new(content.to_s, additional_kwargs)
139
141
  end
140
142
  end
141
143
 
@@ -171,9 +173,9 @@ module Langchain::LLM
171
173
 
172
174
  history.concat transform_messages(messages) unless messages.empty?
173
175
 
174
- unless context.nil? || context.empty?
176
+ unless context.nil? || context.to_s.empty?
175
177
  history.reject! { |message| message[:role] == "system" }
176
- history.prepend({role: "system", content: context})
178
+ history.prepend({role: "system", content: context.content})
177
179
  end
178
180
 
179
181
  unless prompt.empty?
@@ -189,12 +191,9 @@ module Langchain::LLM
189
191
 
190
192
  def transform_messages(messages)
191
193
  messages.map do |message|
192
- role = message[:role] || message["role"]
193
- content = message[:content] || message["content"]
194
-
195
194
  {
196
- content: content,
197
- role: (role == "ai") ? "assistant" : role
195
+ role: ROLE_MAPPING.fetch(message.type, message.type),
196
+ content: message.content
198
197
  }
199
198
  end
200
199
  end
@@ -84,7 +84,8 @@ module Langchain::LLM
84
84
 
85
85
  # Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
86
86
  def chat(...)
87
- complete(...)
87
+ response_text = complete(...)
88
+ Langchain::AIMessage.new(response_text)
88
89
  end
89
90
 
90
91
  #
@@ -98,8 +98,8 @@ module Langchain
98
98
  Dir.glob(File.join(@path, "**/*")).map do |file|
99
99
  # Only load and add to result files with supported extensions
100
100
  Langchain::Loader.new(file, @options).load(&block)
101
- rescue
102
- UnknownFormatError nil
101
+ rescue => e
102
+ UnknownFormatError.new(e)
103
103
  end.flatten.compact
104
104
  end
105
105
 
@@ -134,6 +134,7 @@ module Langchain
134
134
  end
135
135
 
136
136
  def source_type
137
+ binding.pry
137
138
  url? ? @raw_data.content_type : File.extname(@path)
138
139
  end
139
140
 
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Langchain
4
+ class Message
5
+ attr_reader :content, :additional_kwargs
6
+
7
+ def initialize(content, additional_kwargs = nil)
8
+ @content = content
9
+ @additional_kwargs = additional_kwargs
10
+ end
11
+
12
+ def type
13
+ raise NotImplementedError
14
+ end
15
+
16
+ def to_s
17
+ content
18
+ end
19
+
20
+ def ==(other)
21
+ to_json == other.to_json
22
+ end
23
+
24
+ def to_json(options = {})
25
+ hash = {
26
+ type: type,
27
+ content: content
28
+ }
29
+
30
+ hash[:additional_kwargs] = additional_kwargs unless additional_kwargs.nil? || additional_kwargs.empty?
31
+
32
+ hash.to_json
33
+ end
34
+ end
35
+ end
@@ -9,7 +9,8 @@ module Langchain::OutputParsers
9
9
  # Parse the output of an LLM call.
10
10
  #
11
11
  # @param text - LLM output to parse.
12
- # @returns Parsed output.
12
+ #
13
+ # @return [Object] Parsed output.
13
14
  #
14
15
  def parse(text:)
15
16
  raise NotImplementedError
@@ -18,9 +19,9 @@ module Langchain::OutputParsers
18
19
  #
19
20
  # Return a string describing the format of the output.
20
21
  #
21
- # @returns Format instructions.
22
- # @param options - Options for formatting instructions.
23
- # @example
22
+ # @return [String] Format instructions.
23
+ #
24
+ # @example returns the format instructions
24
25
  # ```json
25
26
  # {
26
27
  # "foo": "bar"
@@ -65,7 +65,9 @@ module Langchain::OutputParsers
65
65
  #
66
66
  # Creates a new instance of the class using the given JSON::Schema.
67
67
  #
68
- # @param schema [JSON::Schema] The JSON::Schema to use
68
+ # @param llm [Langchain::LLM] The LLM used in the fixing process
69
+ # @param parser [Langchain::OutputParsers] The parser originally used which resulted in parsing error
70
+ # @param prompt [Langchain::Prompt::PromptTemplate]
69
71
  #
70
72
  # @return [Object] A new instance of the class
71
73
  #