langchainrb 0.6.11 → 0.6.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +5 -11
- data/lib/langchain/agent/base.rb +1 -0
- data/lib/langchain/agent/{react_agent/react_agent.rb → react_agent.rb} +12 -11
- data/lib/langchain/ai_message.rb +9 -0
- data/lib/langchain/conversation.rb +11 -11
- data/lib/langchain/conversation_memory.rb +3 -7
- data/lib/langchain/human_message.rb +9 -0
- data/lib/langchain/llm/cohere.rb +3 -2
- data/lib/langchain/llm/google_palm.rb +16 -11
- data/lib/langchain/llm/llama_cpp.rb +5 -5
- data/lib/langchain/llm/openai.rb +24 -25
- data/lib/langchain/llm/replicate.rb +2 -1
- data/lib/langchain/loader.rb +3 -2
- data/lib/langchain/message.rb +35 -0
- data/lib/langchain/output_parsers/base.rb +5 -4
- data/lib/langchain/output_parsers/{fix.rb → output_fixing_parser.rb} +3 -1
- data/lib/langchain/prompt/loading.rb +73 -67
- data/lib/langchain/prompt.rb +5 -0
- data/lib/langchain/system_message.rb +9 -0
- data/lib/langchain/tool/base.rb +14 -14
- data/lib/langchain/vectorsearch/chroma.rb +3 -2
- data/lib/langchain/vectorsearch/milvus.rb +4 -3
- data/lib/langchain/vectorsearch/pgvector.rb +10 -7
- data/lib/langchain/vectorsearch/pinecone.rb +18 -2
- data/lib/langchain/vectorsearch/qdrant.rb +4 -3
- data/lib/langchain/vectorsearch/weaviate.rb +3 -2
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +19 -97
- metadata +49 -50
- data/.env.example +0 -21
- data/.rspec +0 -3
- data/.rubocop.yml +0 -11
- data/.tool-versions +0 -1
- data/Gemfile +0 -14
- data/Gemfile.lock +0 -360
- data/Rakefile +0 -17
- data/examples/conversation_with_openai.rb +0 -52
- data/examples/create_and_manage_few_shot_prompt_templates.rb +0 -36
- data/examples/create_and_manage_prompt_templates.rb +0 -25
- data/examples/create_and_manage_prompt_templates_using_structured_output_parser.rb +0 -116
- data/examples/llama_cpp.rb +0 -24
- data/examples/open_ai_function_calls.rb +0 -41
- data/examples/open_ai_qdrant_function_calls.rb +0 -39
- data/examples/pdf_store_and_query_with_chroma.rb +0 -40
- data/examples/store_and_query_with_pinecone.rb +0 -46
- data/examples/store_and_query_with_qdrant.rb +0 -37
- data/examples/store_and_query_with_weaviate.rb +0 -32
- data/lefthook.yml +0 -5
- data/sig/langchain.rbs +0 -4
- /data/lib/langchain/agent/{sql_query_agent/sql_query_agent.rb → sql_query_agent.rb} +0 -0
- /data/lib/langchain/output_parsers/{structured.rb → structured_output_parser.rb} +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9a8dc8c16a235328e6122725804fc8dface37910d2014ecf44410631d3ec63cb
|
4
|
+
data.tar.gz: 5d69e6b1dda419d2834f9041a18eae48b2c63303cc901515cd883153635f0742
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 0c176e717986c0bb0b74761858bf0a6aac8e84fff179791fd67b68441760d5a419ae767bb678bab8a783bfaf6a17f53b7177b24d96fd8f1b076fd4f091c5443e
|
7
|
+
data.tar.gz: a0457aaf411f8932ada4bb3683fef81396334186047a5bb26040c0c03634443662806ad3d2d37ec81f4121f519e387f77688e395cc38882b7f8ab85c0795fba6
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,13 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.6.13] - 2023-08-23
|
4
|
+
- Add `k:` parameter to all `ask()` vector search methods
|
5
|
+
- Bump Faraday to 2.x
|
6
|
+
|
7
|
+
## [0.6.12] - 2023-08-13
|
8
|
+
|
9
|
+
## [0.6.11] - 2023-08-08
|
10
|
+
|
3
11
|
## [0.6.10] - 2023-08-01
|
4
12
|
- 🗣️ LLMs
|
5
13
|
- Introducing Anthropic support
|
data/README.md
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
|
7
7
|
:warning: UNDER ACTIVE AND RAPID DEVELOPMENT (MAY BE BUGGY AND UNTESTED)
|
8
8
|
|
9
|
-

|
9
|
+

|
10
10
|
[](https://badge.fury.io/rb/langchainrb)
|
11
11
|
[](http://rubydoc.info/gems/langchainrb)
|
12
12
|
[](https://github.com/andreibondarev/langchainrb/blob/main/LICENSE.txt)
|
@@ -61,10 +61,10 @@ client = Langchain::Vectorsearch::Weaviate.new(
|
|
61
61
|
# You can instantiate any other supported vector search database:
|
62
62
|
client = Langchain::Vectorsearch::Chroma.new(...) # `gem "chroma-db", "~> 0.3.0"`
|
63
63
|
client = Langchain::Vectorsearch::Hnswlib.new(...) # `gem "hnswlib", "~> 0.8.1"`
|
64
|
-
client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.
|
64
|
+
client = Langchain::Vectorsearch::Milvus.new(...) # `gem "milvus", "~> 0.9.2"`
|
65
65
|
client = Langchain::Vectorsearch::Pinecone.new(...) # `gem "pinecone", "~> 0.1.6"`
|
66
66
|
client = Langchain::Vectorsearch::Pgvector.new(...) # `gem "pgvector", "~> 0.2"`
|
67
|
-
client = Langchain::Vectorsearch::Qdrant.new(...) # `gem"qdrant-ruby", "~> 0.9.
|
67
|
+
client = Langchain::Vectorsearch::Qdrant.new(...) # `gem"qdrant-ruby", "~> 0.9.3"`
|
68
68
|
```
|
69
69
|
|
70
70
|
```ruby
|
@@ -161,13 +161,10 @@ qdrant:
|
|
161
161
|
|
162
162
|
```ruby
|
163
163
|
client.llm.functions = functions
|
164
|
-
client.llm.complete_response = true
|
165
164
|
```
|
166
165
|
|
167
|
-
`complete_response` will return the entire choices data from the gpt response
|
168
|
-
|
169
166
|
#### Cohere
|
170
|
-
Add `gem "cohere-ruby", "~> 0.9.
|
167
|
+
Add `gem "cohere-ruby", "~> 0.9.6"` to your Gemfile.
|
171
168
|
|
172
169
|
```ruby
|
173
170
|
cohere = Langchain::LLM::Cohere.new(api_key: ENV["COHERE_API_KEY"])
|
@@ -192,7 +189,7 @@ replicate = Langchain::LLM::Replicate.new(api_key: ENV["REPLICATE_API_KEY"])
|
|
192
189
|
```
|
193
190
|
|
194
191
|
#### Google PaLM (Pathways Language Model)
|
195
|
-
Add `"google_palm_api", "~> 0.1.
|
192
|
+
Add `"google_palm_api", "~> 0.1.3"` to your Gemfile.
|
196
193
|
```ruby
|
197
194
|
google_palm = Langchain::LLM::GooglePalm.new(api_key: ENV["GOOGLE_PALM_API_KEY"])
|
198
195
|
```
|
@@ -422,9 +419,6 @@ agent = Langchain::Agent::ReActAgent.new(
|
|
422
419
|
llm: openai,
|
423
420
|
tools: [search_tool, calculator]
|
424
421
|
)
|
425
|
-
|
426
|
-
agent.tools
|
427
|
-
# => ["google_search", "calculator"]
|
428
422
|
```
|
429
423
|
```ruby
|
430
424
|
agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
|
data/lib/langchain/agent/base.rb
CHANGED
@@ -7,12 +7,13 @@ module Langchain::Agent
|
|
7
7
|
#
|
8
8
|
# agent = Langchain::Agent::ReActAgent.new(
|
9
9
|
# llm: llm,
|
10
|
-
# tools: [
|
10
|
+
# tools: [
|
11
|
+
# Langchain::Tool::GoogleSearch.new(api_key: "YOUR_API_KEY"),
|
12
|
+
# Langchain::Tool::Calculator.new,
|
13
|
+
# Langchain::Tool::Wikipedia.new
|
14
|
+
# ]
|
11
15
|
# )
|
12
16
|
#
|
13
|
-
# agent.tools
|
14
|
-
# # => ["google_search", "calculator", "wikipedia"]
|
15
|
-
#
|
16
17
|
# agent.run(question: "How many full soccer fields would be needed to cover the distance between NYC and DC in a straight line?")
|
17
18
|
# #=> "Approximately 2,945 soccer fields would be needed to cover the distance between NYC and DC in a straight line."
|
18
19
|
class ReActAgent < Base
|
@@ -21,7 +22,7 @@ module Langchain::Agent
|
|
21
22
|
# Initializes the Agent
|
22
23
|
#
|
23
24
|
# @param llm [Object] The LLM client to use
|
24
|
-
# @param tools [Array] The tools to use
|
25
|
+
# @param tools [Array<Tool>] The tools to use
|
25
26
|
# @param max_iterations [Integer] The maximum number of iterations to run
|
26
27
|
# @return [ReActAgent] The Agent::ReActAgent instance
|
27
28
|
def initialize(llm:, tools: [], max_iterations: 10)
|
@@ -35,8 +36,8 @@ module Langchain::Agent
|
|
35
36
|
|
36
37
|
# Validate tools when they're re-assigned
|
37
38
|
#
|
38
|
-
# @param value [Array] The tools to use
|
39
|
-
# @return [Array] The tools that will be used
|
39
|
+
# @param value [Array<Tool>] The tools to use
|
40
|
+
# @return [Array<Tool>] The tools that will be used
|
40
41
|
def tools=(value)
|
41
42
|
Langchain::Tool::Base.validate_tools!(tools: value)
|
42
43
|
@tools = value
|
@@ -70,7 +71,7 @@ module Langchain::Agent
|
|
70
71
|
action_input = response.match(/Action Input: "?(.*)"?/)&.send(:[], -1)
|
71
72
|
|
72
73
|
# Find the Tool and call `execute`` with action_input as the input
|
73
|
-
tool = tools.find { |tool| tool.
|
74
|
+
tool = tools.find { |tool| tool.name == action.strip }
|
74
75
|
Langchain.logger.info("Invoking \"#{tool.class}\" Tool with \"#{action_input}\"", for: self.class)
|
75
76
|
|
76
77
|
# Call `execute` with action_input as the input
|
@@ -99,15 +100,15 @@ module Langchain::Agent
|
|
99
100
|
# @param tools [Array] Tools to use
|
100
101
|
# @return [String] Prompt
|
101
102
|
def create_prompt(question:, tools:)
|
102
|
-
tool_list = tools.map(&:
|
103
|
+
tool_list = tools.map(&:name)
|
103
104
|
|
104
105
|
prompt_template.format(
|
105
106
|
date: Date.today.strftime("%B %d, %Y"),
|
106
107
|
question: question,
|
107
108
|
tool_names: "[#{tool_list.join(", ")}]",
|
108
109
|
tools: tools.map do |tool|
|
109
|
-
tool_name = tool.
|
110
|
-
tool_description = tool.
|
110
|
+
tool_name = tool.name
|
111
|
+
tool_description = tool.description
|
111
112
|
"#{tool_name}: #{tool_description}"
|
112
113
|
end.join("\n")
|
113
114
|
)
|
@@ -39,45 +39,45 @@ module Langchain
|
|
39
39
|
|
40
40
|
def set_functions(functions)
|
41
41
|
@llm.functions = functions
|
42
|
-
@llm.complete_response = true
|
43
42
|
end
|
44
43
|
|
45
44
|
# Set the context of the conversation. Usually used to set the model's persona.
|
46
45
|
# @param message [String] The context of the conversation
|
47
46
|
def set_context(message)
|
48
|
-
@memory.set_context message
|
47
|
+
@memory.set_context SystemMessage.new(message)
|
49
48
|
end
|
50
49
|
|
51
50
|
# Add examples to the conversation. Used to give the model a sense of the conversation.
|
52
|
-
# @param examples [Array<
|
51
|
+
# @param examples [Array<AIMessage|HumanMessage>] The examples to add to the conversation
|
53
52
|
def add_examples(examples)
|
54
53
|
@memory.add_examples examples
|
55
54
|
end
|
56
55
|
|
57
56
|
# Message the model with a prompt and return the response.
|
58
57
|
# @param message [String] The prompt to message the model with
|
59
|
-
# @return [
|
58
|
+
# @return [AIMessage] The response from the model
|
60
59
|
def message(message)
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
60
|
+
human_message = HumanMessage.new(message)
|
61
|
+
@memory.append_message(human_message)
|
62
|
+
ai_message = llm_response(human_message)
|
63
|
+
@memory.append_message(ai_message)
|
64
|
+
ai_message
|
65
65
|
end
|
66
66
|
|
67
67
|
# Messages from conversation memory
|
68
|
-
# @return [Array<
|
68
|
+
# @return [Array<AIMessage|HumanMessage>] The messages from the conversation memory
|
69
69
|
def messages
|
70
70
|
@memory.messages
|
71
71
|
end
|
72
72
|
|
73
73
|
# Context from conversation memory
|
74
|
-
# @return [
|
74
|
+
# @return [SystemMessage] Context from conversation memory
|
75
75
|
def context
|
76
76
|
@memory.context
|
77
77
|
end
|
78
78
|
|
79
79
|
# Examples from conversation memory
|
80
|
-
# @return [Array<
|
80
|
+
# @return [Array<AIMessage|HumanMessage>] Examples from the conversation memory
|
81
81
|
def examples
|
82
82
|
@memory.examples
|
83
83
|
end
|
@@ -25,12 +25,8 @@ module Langchain
|
|
25
25
|
@examples.concat examples
|
26
26
|
end
|
27
27
|
|
28
|
-
def
|
29
|
-
@messages
|
30
|
-
end
|
31
|
-
|
32
|
-
def append_user_message(message)
|
33
|
-
@messages << {role: "user", content: message}
|
28
|
+
def append_message(message)
|
29
|
+
@messages.append(message)
|
34
30
|
end
|
35
31
|
|
36
32
|
def reduce_messages(exception)
|
@@ -47,7 +43,7 @@ module Langchain
|
|
47
43
|
def context
|
48
44
|
return if @context.nil? && @summary.nil?
|
49
45
|
|
50
|
-
[@context, @summary].compact.join("\n")
|
46
|
+
SystemMessage.new([@context, @summary].compact.join("\n"))
|
51
47
|
end
|
52
48
|
|
53
49
|
private
|
data/lib/langchain/llm/cohere.rb
CHANGED
@@ -5,7 +5,7 @@ module Langchain::LLM
|
|
5
5
|
# Wrapper around the Cohere API.
|
6
6
|
#
|
7
7
|
# Gem requirements:
|
8
|
-
# gem "cohere-ruby", "~> 0.9.
|
8
|
+
# gem "cohere-ruby", "~> 0.9.6"
|
9
9
|
#
|
10
10
|
# Usage:
|
11
11
|
# cohere = Langchain::LLM::Cohere.new(api_key: "YOUR_API_KEY")
|
@@ -70,7 +70,8 @@ module Langchain::LLM
|
|
70
70
|
|
71
71
|
# Cohere does not have a dedicated chat endpoint, so instead we call `complete()`
|
72
72
|
def chat(...)
|
73
|
-
complete(...)
|
73
|
+
response_text = complete(...)
|
74
|
+
Langchain::AIMessage.new(response_text)
|
74
75
|
end
|
75
76
|
|
76
77
|
# Generate a summary in English for a given text
|
@@ -5,7 +5,7 @@ module Langchain::LLM
|
|
5
5
|
# Wrapper around the Google PaLM (Pathways Language Model) APIs: https://ai.google/build/machine-learning/
|
6
6
|
#
|
7
7
|
# Gem requirements:
|
8
|
-
# gem "google_palm_api", "~> 0.1.
|
8
|
+
# gem "google_palm_api", "~> 0.1.3"
|
9
9
|
#
|
10
10
|
# Usage:
|
11
11
|
# google_palm = Langchain::LLM::GooglePalm.new(api_key: "YOUR_API_KEY")
|
@@ -19,6 +19,9 @@ module Langchain::LLM
|
|
19
19
|
embeddings_model_name: "embedding-gecko-001"
|
20
20
|
}.freeze
|
21
21
|
LENGTH_VALIDATOR = Langchain::Utils::TokenLength::GooglePalmValidator
|
22
|
+
ROLE_MAPPING = {
|
23
|
+
"human" => "user"
|
24
|
+
}
|
22
25
|
|
23
26
|
def initialize(api_key:, default_options: {})
|
24
27
|
depends_on "google_palm_api"
|
@@ -72,10 +75,12 @@ module Langchain::LLM
|
|
72
75
|
#
|
73
76
|
# Generate a chat completion for a given prompt
|
74
77
|
#
|
75
|
-
# @param prompt [
|
76
|
-
# @param messages [Array] The messages that have been sent in the conversation
|
77
|
-
# @param
|
78
|
-
# @
|
78
|
+
# @param prompt [HumanMessage] The prompt to generate a chat completion for
|
79
|
+
# @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
|
80
|
+
# @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
|
81
|
+
# @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
|
82
|
+
# @param options [Hash] extra parameters passed to GooglePalmAPI::Client#generate_chat_message
|
83
|
+
# @return [AIMessage] The chat completion
|
79
84
|
#
|
80
85
|
def chat(prompt: "", messages: [], context: "", examples: [], **options)
|
81
86
|
raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
|
@@ -83,7 +88,7 @@ module Langchain::LLM
|
|
83
88
|
default_params = {
|
84
89
|
temperature: @defaults[:temperature],
|
85
90
|
model: @defaults[:chat_completion_model_name],
|
86
|
-
context: context,
|
91
|
+
context: context.to_s,
|
87
92
|
messages: compose_chat_messages(prompt: prompt, messages: messages),
|
88
93
|
examples: compose_examples(examples)
|
89
94
|
}
|
@@ -104,7 +109,7 @@ module Langchain::LLM
|
|
104
109
|
response = client.generate_chat_message(**default_params)
|
105
110
|
raise "GooglePalm API returned an error: #{response}" if response.dig("error")
|
106
111
|
|
107
|
-
response.dig("candidates", 0, "content")
|
112
|
+
Langchain::AIMessage.new(response.dig("candidates", 0, "content"))
|
108
113
|
end
|
109
114
|
|
110
115
|
#
|
@@ -146,8 +151,8 @@ module Langchain::LLM
|
|
146
151
|
def compose_examples(examples)
|
147
152
|
examples.each_slice(2).map do |example|
|
148
153
|
{
|
149
|
-
input: {content: example.first
|
150
|
-
output: {content: example.last
|
154
|
+
input: {content: example.first.content},
|
155
|
+
output: {content: example.last.content}
|
151
156
|
}
|
152
157
|
end
|
153
158
|
end
|
@@ -155,8 +160,8 @@ module Langchain::LLM
|
|
155
160
|
def transform_messages(messages)
|
156
161
|
messages.map do |message|
|
157
162
|
{
|
158
|
-
author: message
|
159
|
-
content: message
|
163
|
+
author: ROLE_MAPPING.fetch(message.type, message.type),
|
164
|
+
content: message.content
|
160
165
|
}
|
161
166
|
end
|
162
167
|
end
|
@@ -33,8 +33,8 @@ module Langchain::LLM
|
|
33
33
|
@seed = seed
|
34
34
|
end
|
35
35
|
|
36
|
-
# @
|
37
|
-
# @
|
36
|
+
# @param text [String] The text to embed
|
37
|
+
# @param n_threads [Integer] The number of CPU threads to use
|
38
38
|
# @return [Array] The embedding
|
39
39
|
def embed(text:, n_threads: nil)
|
40
40
|
# contexts are kinda stateful when it comes to embeddings, so allocate one each time
|
@@ -49,9 +49,9 @@ module Langchain::LLM
|
|
49
49
|
context.embeddings
|
50
50
|
end
|
51
51
|
|
52
|
-
# @
|
53
|
-
# @
|
54
|
-
# @
|
52
|
+
# @param prompt [String] The prompt to complete
|
53
|
+
# @param n_predict [Integer] The number of tokens to predict
|
54
|
+
# @param n_threads [Integer] The number of CPU threads to use
|
55
55
|
# @return [String] The completed prompt
|
56
56
|
def complete(prompt:, n_predict: 128, n_threads: nil)
|
57
57
|
n_threads ||= self.n_threads
|
data/lib/langchain/llm/openai.rb
CHANGED
@@ -18,8 +18,12 @@ module Langchain::LLM
|
|
18
18
|
dimension: 1536
|
19
19
|
}.freeze
|
20
20
|
LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
|
21
|
+
ROLE_MAPPING = {
|
22
|
+
"ai" => "assistant",
|
23
|
+
"human" => "user"
|
24
|
+
}
|
21
25
|
|
22
|
-
attr_accessor :functions
|
26
|
+
attr_accessor :functions
|
23
27
|
|
24
28
|
def initialize(api_key:, llm_options: {}, default_options: {})
|
25
29
|
depends_on "ruby-openai"
|
@@ -98,19 +102,13 @@ module Langchain::LLM
|
|
98
102
|
# },
|
99
103
|
# ]
|
100
104
|
#
|
101
|
-
# @param prompt [
|
102
|
-
# @param messages [Array<
|
103
|
-
#
|
104
|
-
#
|
105
|
-
#
|
106
|
-
# @
|
107
|
-
# @
|
108
|
-
# Each message should be a Hash with the following keys:
|
109
|
-
# - :content [String] The content of the message
|
110
|
-
# - :role [String] The role of the sender (system, user, assistant, or function)
|
111
|
-
# @param options <Hash> extra parameters passed to OpenAI::Client#chat
|
112
|
-
# @yield [String] Stream responses back one String at a time
|
113
|
-
# @return [String] The chat completion
|
105
|
+
# @param prompt [HumanMessage] The prompt to generate a chat completion for
|
106
|
+
# @param messages [Array<AIMessage|HumanMessage>] The messages that have been sent in the conversation
|
107
|
+
# @param context [SystemMessage] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
|
108
|
+
# @param examples [Array<AIMessage|HumanMessage>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
|
109
|
+
# @param options [Hash] extra parameters passed to OpenAI::Client#chat
|
110
|
+
# @yield [AIMessage] Stream responses back one String at a time
|
111
|
+
# @return [AIMessage] The chat completion
|
114
112
|
#
|
115
113
|
def chat(prompt: "", messages: [], context: "", examples: [], **options)
|
116
114
|
raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
|
@@ -126,16 +124,20 @@ module Langchain::LLM
|
|
126
124
|
|
127
125
|
if (streaming = block_given?)
|
128
126
|
parameters[:stream] = proc do |chunk, _bytesize|
|
129
|
-
|
130
|
-
|
127
|
+
delta = chunk.dig("choices", 0, "delta")
|
128
|
+
content = delta["content"]
|
129
|
+
additional_kwargs = {function_call: delta["function_call"]}.compact
|
130
|
+
yield Langchain::AIMessage.new(content, additional_kwargs)
|
131
131
|
end
|
132
132
|
end
|
133
133
|
|
134
134
|
response = client.chat(parameters: parameters)
|
135
135
|
raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
|
136
136
|
unless streaming
|
137
|
-
|
138
|
-
|
137
|
+
message = response.dig("choices", 0, "message")
|
138
|
+
content = message["content"]
|
139
|
+
additional_kwargs = {function_call: message["function_call"]}.compact
|
140
|
+
Langchain::AIMessage.new(content.to_s, additional_kwargs)
|
139
141
|
end
|
140
142
|
end
|
141
143
|
|
@@ -171,9 +173,9 @@ module Langchain::LLM
|
|
171
173
|
|
172
174
|
history.concat transform_messages(messages) unless messages.empty?
|
173
175
|
|
174
|
-
unless context.nil? || context.empty?
|
176
|
+
unless context.nil? || context.to_s.empty?
|
175
177
|
history.reject! { |message| message[:role] == "system" }
|
176
|
-
history.prepend({role: "system", content: context})
|
178
|
+
history.prepend({role: "system", content: context.content})
|
177
179
|
end
|
178
180
|
|
179
181
|
unless prompt.empty?
|
@@ -189,12 +191,9 @@ module Langchain::LLM
|
|
189
191
|
|
190
192
|
def transform_messages(messages)
|
191
193
|
messages.map do |message|
|
192
|
-
role = message[:role] || message["role"]
|
193
|
-
content = message[:content] || message["content"]
|
194
|
-
|
195
194
|
{
|
196
|
-
|
197
|
-
|
195
|
+
role: ROLE_MAPPING.fetch(message.type, message.type),
|
196
|
+
content: message.content
|
198
197
|
}
|
199
198
|
end
|
200
199
|
end
|
data/lib/langchain/loader.rb
CHANGED
@@ -98,8 +98,8 @@ module Langchain
|
|
98
98
|
Dir.glob(File.join(@path, "**/*")).map do |file|
|
99
99
|
# Only load and add to result files with supported extensions
|
100
100
|
Langchain::Loader.new(file, @options).load(&block)
|
101
|
-
rescue
|
102
|
-
UnknownFormatError
|
101
|
+
rescue => e
|
102
|
+
UnknownFormatError.new(e)
|
103
103
|
end.flatten.compact
|
104
104
|
end
|
105
105
|
|
@@ -134,6 +134,7 @@ module Langchain
|
|
134
134
|
end
|
135
135
|
|
136
136
|
def source_type
|
137
|
+
binding.pry
|
137
138
|
url? ? @raw_data.content_type : File.extname(@path)
|
138
139
|
end
|
139
140
|
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain
|
4
|
+
class Message
|
5
|
+
attr_reader :content, :additional_kwargs
|
6
|
+
|
7
|
+
def initialize(content, additional_kwargs = nil)
|
8
|
+
@content = content
|
9
|
+
@additional_kwargs = additional_kwargs
|
10
|
+
end
|
11
|
+
|
12
|
+
def type
|
13
|
+
raise NotImplementedError
|
14
|
+
end
|
15
|
+
|
16
|
+
def to_s
|
17
|
+
content
|
18
|
+
end
|
19
|
+
|
20
|
+
def ==(other)
|
21
|
+
to_json == other.to_json
|
22
|
+
end
|
23
|
+
|
24
|
+
def to_json(options = {})
|
25
|
+
hash = {
|
26
|
+
type: type,
|
27
|
+
content: content
|
28
|
+
}
|
29
|
+
|
30
|
+
hash[:additional_kwargs] = additional_kwargs unless additional_kwargs.nil? || additional_kwargs.empty?
|
31
|
+
|
32
|
+
hash.to_json
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -9,7 +9,8 @@ module Langchain::OutputParsers
|
|
9
9
|
# Parse the output of an LLM call.
|
10
10
|
#
|
11
11
|
# @param text - LLM output to parse.
|
12
|
-
#
|
12
|
+
#
|
13
|
+
# @return [Object] Parsed output.
|
13
14
|
#
|
14
15
|
def parse(text:)
|
15
16
|
raise NotImplementedError
|
@@ -18,9 +19,9 @@ module Langchain::OutputParsers
|
|
18
19
|
#
|
19
20
|
# Return a string describing the format of the output.
|
20
21
|
#
|
21
|
-
# @
|
22
|
-
#
|
23
|
-
# @example
|
22
|
+
# @return [String] Format instructions.
|
23
|
+
#
|
24
|
+
# @example returns the format instructions
|
24
25
|
# ```json
|
25
26
|
# {
|
26
27
|
# "foo": "bar"
|
@@ -65,7 +65,9 @@ module Langchain::OutputParsers
|
|
65
65
|
#
|
66
66
|
# Creates a new instance of the class using the given JSON::Schema.
|
67
67
|
#
|
68
|
-
# @param
|
68
|
+
# @param llm [Langchain::LLM] The LLM used in the fixing process
|
69
|
+
# @param parser [Langchain::OutputParsers] The parser originally used which resulted in parsing error
|
70
|
+
# @param prompt [Langchain::Prompt::PromptTemplate]
|
69
71
|
#
|
70
72
|
# @return [Object] A new instance of the class
|
71
73
|
#
|