deepagents 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/LICENSE +21 -0
- data/README.md +237 -0
- data/Rakefile +17 -0
- data/examples/langchain_integration.rb +58 -0
- data/examples/research_agent.rb +180 -0
- data/lib/deepagents/deepagentsrb/errors.rb +80 -0
- data/lib/deepagents/deepagentsrb/graph.rb +213 -0
- data/lib/deepagents/deepagentsrb/models.rb +167 -0
- data/lib/deepagents/deepagentsrb/state.rb +139 -0
- data/lib/deepagents/deepagentsrb/sub_agent.rb +125 -0
- data/lib/deepagents/deepagentsrb/tools.rb +205 -0
- data/lib/deepagents/deepagentsrb/version.rb +3 -0
- data/lib/deepagents/errors.rb +80 -0
- data/lib/deepagents/graph.rb +207 -0
- data/lib/deepagents/models.rb +217 -0
- data/lib/deepagents/state.rb +139 -0
- data/lib/deepagents/sub_agent.rb +130 -0
- data/lib/deepagents/tools.rb +152 -0
- data/lib/deepagents/version.rb +3 -0
- data/lib/deepagents.rb +61 -0
- metadata +150 -0
@@ -0,0 +1,213 @@
|
|
1
|
+
require_relative 'tools'
|
2
|
+
require_relative 'state'
|
3
|
+
require_relative 'sub_agent'
|
4
|
+
require_relative 'models'
|
5
|
+
require 'securerandom'
|
6
|
+
|
7
|
+
module DeepAgentsRb
|
8
|
+
module Graph
|
9
|
+
# Base prompt for all deep agents
|
10
|
+
BASE_PROMPT = <<~PROMPT
|
11
|
+
You have access to a number of standard tools
|
12
|
+
|
13
|
+
## `write_todos`
|
14
|
+
|
15
|
+
You have access to the `write_todos` tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.
|
16
|
+
These tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.
|
17
|
+
|
18
|
+
It is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.
|
19
|
+
## `task`
|
20
|
+
|
21
|
+
- When doing web search, prefer to use the `task` tool in order to reduce context usage.
|
22
|
+
PROMPT
|
23
|
+
|
24
|
+
# Get the default model
|
25
|
+
def self.get_default_model
|
26
|
+
Models.get_default_model
|
27
|
+
end
|
28
|
+
|
29
|
+
# Create a deep agent
|
30
|
+
def self.create_deep_agent(tools, instructions, model: nil, subagents: nil, state_schema: nil)
|
31
|
+
# Combine instructions with base prompt
|
32
|
+
prompt = instructions + BASE_PROMPT
|
33
|
+
|
34
|
+
# Get built-in tools
|
35
|
+
built_in_tools = Tools.built_in_tools
|
36
|
+
|
37
|
+
# Set default model if not provided
|
38
|
+
model ||= get_default_model
|
39
|
+
|
40
|
+
# Set default state schema
|
41
|
+
state_schema ||= DeepAgentState
|
42
|
+
|
43
|
+
# Convert subagents to proper format if provided
|
44
|
+
subagents = (subagents || []).map do |agent|
|
45
|
+
if agent.is_a?(Hash)
|
46
|
+
SubAgent.from_h(agent)
|
47
|
+
else
|
48
|
+
agent
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Create task tool for delegating to sub-agents
|
53
|
+
task_tool = SubAgentSystem.create_task_tool(
|
54
|
+
tools + built_in_tools,
|
55
|
+
instructions,
|
56
|
+
subagents,
|
57
|
+
model,
|
58
|
+
state_schema
|
59
|
+
)
|
60
|
+
|
61
|
+
# Combine all tools
|
62
|
+
all_tools = built_in_tools + tools + [task_tool]
|
63
|
+
|
64
|
+
# Create and return the deep agent
|
65
|
+
DeepAgent.new(model, prompt, all_tools, state_schema)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# Deep Agent class
|
70
|
+
class DeepAgent
|
71
|
+
attr_reader :model, :prompt, :tools, :state_class
|
72
|
+
|
73
|
+
def initialize(model, prompt, tools, state_class)
|
74
|
+
@model = model
|
75
|
+
@prompt = prompt
|
76
|
+
@tools = tools
|
77
|
+
@state_class = state_class
|
78
|
+
end
|
79
|
+
|
80
|
+
def invoke(input)
|
81
|
+
# Create initial state
|
82
|
+
state = @state_class.new
|
83
|
+
|
84
|
+
# Add messages to state if provided
|
85
|
+
if input[:messages]
|
86
|
+
state.messages = input[:messages]
|
87
|
+
end
|
88
|
+
|
89
|
+
# Add files to state if provided
|
90
|
+
if input[:files]
|
91
|
+
state.files = input[:files]
|
92
|
+
end
|
93
|
+
|
94
|
+
# Start the agent loop
|
95
|
+
run_agent_loop(state)
|
96
|
+
end
|
97
|
+
|
98
|
+
def run_agent_loop(state)
|
99
|
+
# Maximum number of iterations to prevent infinite loops
|
100
|
+
max_iterations = 10
|
101
|
+
iterations = 0
|
102
|
+
|
103
|
+
while iterations < max_iterations
|
104
|
+
iterations += 1
|
105
|
+
|
106
|
+
# Get the current messages
|
107
|
+
messages = state.messages
|
108
|
+
|
109
|
+
# Generate a response from the model
|
110
|
+
response = @model.generate(@prompt, messages)
|
111
|
+
|
112
|
+
# Parse the response for tool calls
|
113
|
+
tool_calls = parse_tool_calls(response)
|
114
|
+
|
115
|
+
if tool_calls.empty?
|
116
|
+
# No tool calls, just add the response as an assistant message
|
117
|
+
state.update(messages: [{ role: "assistant", content: response }])
|
118
|
+
break
|
119
|
+
else
|
120
|
+
# Add the response with tool calls as an assistant message
|
121
|
+
state.update(messages: [{ role: "assistant", content: response }])
|
122
|
+
|
123
|
+
# Execute each tool call
|
124
|
+
tool_results = []
|
125
|
+
tool_calls.each do |tool_call|
|
126
|
+
result = execute_tool_call(tool_call, state)
|
127
|
+
tool_results << result
|
128
|
+
|
129
|
+
# Update state with tool result if it's a Command
|
130
|
+
if result.is_a?(Tools::Command)
|
131
|
+
state.update(result.update)
|
132
|
+
else
|
133
|
+
# Add tool result as a tool message
|
134
|
+
state.update(messages: [{
|
135
|
+
role: "tool",
|
136
|
+
content: result.to_s,
|
137
|
+
tool_call_id: tool_call[:id]
|
138
|
+
}])
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
# Check if we should continue the loop
|
143
|
+
if should_continue?(response, tool_results)
|
144
|
+
next
|
145
|
+
else
|
146
|
+
break
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
state
|
152
|
+
end
|
153
|
+
|
154
|
+
private
|
155
|
+
|
156
|
+
def parse_tool_calls(response)
|
157
|
+
# This is a simple implementation that looks for tool calls in the format:
|
158
|
+
# ```tool_call
|
159
|
+
# {"name": "tool_name", "arguments": {"arg1": "value1", ...}}
|
160
|
+
# ```
|
161
|
+
|
162
|
+
tool_calls = []
|
163
|
+
tool_call_pattern = /```tool_call\s+({[\s\S]*?})\s+```/m
|
164
|
+
|
165
|
+
response.scan(tool_call_pattern).each do |match|
|
166
|
+
begin
|
167
|
+
tool_data = JSON.parse(match[0])
|
168
|
+
tool_calls << {
|
169
|
+
id: SecureRandom.uuid,
|
170
|
+
name: tool_data["name"],
|
171
|
+
arguments: tool_data["arguments"]
|
172
|
+
}
|
173
|
+
rescue JSON::ParserError => e
|
174
|
+
# Skip invalid JSON
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
tool_calls
|
179
|
+
end
|
180
|
+
|
181
|
+
def execute_tool_call(tool_call, state)
|
182
|
+
tool_name = tool_call[:name]
|
183
|
+
arguments = tool_call[:arguments]
|
184
|
+
tool_call_id = tool_call[:id]
|
185
|
+
|
186
|
+
# Find the tool
|
187
|
+
tool = @tools.find { |t| t.name == tool_name }
|
188
|
+
|
189
|
+
if tool.nil?
|
190
|
+
return "Error: Tool '#{tool_name}' not found"
|
191
|
+
end
|
192
|
+
|
193
|
+
# Execute the tool
|
194
|
+
begin
|
195
|
+
# Add state and tool_call_id to arguments
|
196
|
+
kwargs = arguments.transform_keys(&:to_sym)
|
197
|
+
kwargs[:state] = state
|
198
|
+
kwargs[:tool_call_id] = tool_call_id
|
199
|
+
|
200
|
+
# Call the tool
|
201
|
+
tool.call(**kwargs)
|
202
|
+
rescue ArgumentError => e
|
203
|
+
"Error executing tool '#{tool_name}': #{e.message}"
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
def should_continue?(response, tool_results)
|
208
|
+
# Check if the response or any tool result indicates we should continue
|
209
|
+
# For now, we'll just continue if any tool was executed successfully
|
210
|
+
!tool_results.empty?
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
@@ -0,0 +1,167 @@
|
|
1
|
+
module DeepAgentsRb
|
2
|
+
module Models
|
3
|
+
# Base model interface that all model adapters should implement
|
4
|
+
class BaseModel
|
5
|
+
def generate(prompt, messages)
|
6
|
+
raise NotImplementedError, "Subclasses must implement the generate method"
|
7
|
+
end
|
8
|
+
|
9
|
+
def stream_generate(prompt, messages, &block)
|
10
|
+
# Default implementation just calls generate and yields the result
|
11
|
+
# Subclasses can override for true streaming
|
12
|
+
result = generate(prompt, messages)
|
13
|
+
yield result if block_given?
|
14
|
+
result
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
# Anthropic Claude model adapter
|
19
|
+
class Claude < BaseModel
|
20
|
+
def initialize(api_key: nil, model: "claude-3-sonnet-20240229", client: nil)
|
21
|
+
require 'anthropic'
|
22
|
+
@api_key = api_key || ENV['ANTHROPIC_API_KEY']
|
23
|
+
@model = model
|
24
|
+
@client = client || Anthropic::Client.new(api_key: @api_key)
|
25
|
+
end
|
26
|
+
|
27
|
+
def generate(prompt, messages)
|
28
|
+
response = @client.messages(
|
29
|
+
model: @model,
|
30
|
+
max_tokens: 4096,
|
31
|
+
messages: messages,
|
32
|
+
system: prompt
|
33
|
+
)
|
34
|
+
response.content[0].text
|
35
|
+
end
|
36
|
+
|
37
|
+
def stream_generate(prompt, messages, &block)
|
38
|
+
response = @client.messages(
|
39
|
+
model: @model,
|
40
|
+
max_tokens: 4096,
|
41
|
+
messages: messages,
|
42
|
+
system: prompt,
|
43
|
+
stream: true
|
44
|
+
)
|
45
|
+
|
46
|
+
full_text = ""
|
47
|
+
response.each do |chunk|
|
48
|
+
if chunk.type == "content_block_delta" && chunk.delta.type == "text"
|
49
|
+
full_text += chunk.delta.text
|
50
|
+
yield chunk.delta.text if block_given?
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
full_text
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
# OpenAI model adapter
|
59
|
+
class OpenAI < BaseModel
|
60
|
+
def initialize(api_key: nil, model: "gpt-4o", client: nil)
|
61
|
+
require 'openai'
|
62
|
+
@api_key = api_key || ENV['OPENAI_API_KEY']
|
63
|
+
@model = model
|
64
|
+
@client = client || ::OpenAI::Client.new(access_token: @api_key)
|
65
|
+
end
|
66
|
+
|
67
|
+
def generate(prompt, messages)
|
68
|
+
# Convert messages to OpenAI format if needed
|
69
|
+
openai_messages = messages.map do |msg|
|
70
|
+
msg = msg.transform_keys(&:to_sym) if msg.is_a?(Hash)
|
71
|
+
msg
|
72
|
+
end
|
73
|
+
|
74
|
+
# Add system message with prompt
|
75
|
+
openai_messages.unshift({ role: "system", content: prompt }) unless prompt.nil? || prompt.empty?
|
76
|
+
|
77
|
+
response = @client.chat(
|
78
|
+
parameters: {
|
79
|
+
model: @model,
|
80
|
+
messages: openai_messages
|
81
|
+
}
|
82
|
+
)
|
83
|
+
|
84
|
+
response.dig("choices", 0, "message", "content")
|
85
|
+
end
|
86
|
+
|
87
|
+
def stream_generate(prompt, messages, &block)
|
88
|
+
# Convert messages to OpenAI format if needed
|
89
|
+
openai_messages = messages.map do |msg|
|
90
|
+
msg = msg.transform_keys(&:to_sym) if msg.is_a?(Hash)
|
91
|
+
msg
|
92
|
+
end
|
93
|
+
|
94
|
+
# Add system message with prompt
|
95
|
+
openai_messages.unshift({ role: "system", content: prompt }) unless prompt.nil? || prompt.empty?
|
96
|
+
|
97
|
+
full_text = ""
|
98
|
+
@client.chat(
|
99
|
+
parameters: {
|
100
|
+
model: @model,
|
101
|
+
messages: openai_messages,
|
102
|
+
stream: true
|
103
|
+
}
|
104
|
+
) do |chunk|
|
105
|
+
content = chunk.dig("choices", 0, "delta", "content")
|
106
|
+
if content
|
107
|
+
full_text += content
|
108
|
+
yield content if block_given?
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
full_text
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
# Mock model for testing
|
117
|
+
class MockModel < BaseModel
|
118
|
+
def initialize(responses = {})
|
119
|
+
@responses = responses
|
120
|
+
@default_response = "I'll help you with that task."
|
121
|
+
end
|
122
|
+
|
123
|
+
def generate(prompt, messages)
|
124
|
+
# Try to match the last user message to a predefined response
|
125
|
+
last_user_message = messages.reverse.find { |m| m[:role] == "user" || m["role"] == "user" }
|
126
|
+
|
127
|
+
if last_user_message
|
128
|
+
content = last_user_message[:content] || last_user_message["content"]
|
129
|
+
|
130
|
+
# Check if we have a matching response
|
131
|
+
@responses.each do |key, response|
|
132
|
+
return response if content.include?(key)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
# Return default response if no match found
|
137
|
+
@default_response
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
# Get the default model (used when no model is specified)
|
142
|
+
def self.get_default_model
|
143
|
+
# Try to use Claude if the API key is available
|
144
|
+
if ENV['ANTHROPIC_API_KEY']
|
145
|
+
begin
|
146
|
+
require 'anthropic'
|
147
|
+
return Claude.new
|
148
|
+
rescue LoadError
|
149
|
+
# Anthropic gem not available
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
# Try to use OpenAI if the API key is available
|
154
|
+
if ENV['OPENAI_API_KEY']
|
155
|
+
begin
|
156
|
+
require 'openai'
|
157
|
+
return OpenAI.new
|
158
|
+
rescue LoadError
|
159
|
+
# OpenAI gem not available
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
# Fall back to mock model
|
164
|
+
MockModel.new
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
@@ -0,0 +1,139 @@
|
|
1
|
+
require_relative 'errors'
|
2
|
+
|
3
|
+
module DeepAgentsRb
|
4
|
+
# Todo class for managing todos
|
5
|
+
class Todo
|
6
|
+
attr_accessor :content, :status
|
7
|
+
|
8
|
+
def initialize(content, status = "pending")
|
9
|
+
raise ArgumentError, "Todo content cannot be nil or empty" if content.nil? || content.empty?
|
10
|
+
@content = content
|
11
|
+
@status = validate_status(status)
|
12
|
+
end
|
13
|
+
|
14
|
+
def to_h
|
15
|
+
{
|
16
|
+
content: @content,
|
17
|
+
status: @status
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.from_h(hash)
|
22
|
+
content = hash[:content] || hash["content"]
|
23
|
+
status = hash[:status] || hash["status"] || "pending"
|
24
|
+
|
25
|
+
raise ArgumentError, "Todo hash must contain content" if content.nil?
|
26
|
+
new(content, status)
|
27
|
+
end
|
28
|
+
|
29
|
+
private
|
30
|
+
|
31
|
+
def validate_status(status)
|
32
|
+
valid_statuses = ["pending", "in_progress", "completed"]
|
33
|
+
status = status.to_s.downcase
|
34
|
+
|
35
|
+
unless valid_statuses.include?(status)
|
36
|
+
raise ArgumentError, "Invalid todo status: #{status}. Must be one of: #{valid_statuses.join(', ')}"
|
37
|
+
end
|
38
|
+
|
39
|
+
status
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Deep agent state class
|
44
|
+
class DeepAgentState
|
45
|
+
attr_reader :todos, :files, :messages
|
46
|
+
|
47
|
+
def initialize
|
48
|
+
@todos = []
|
49
|
+
@files = {}
|
50
|
+
@messages = []
|
51
|
+
end
|
52
|
+
|
53
|
+
def todos=(value)
|
54
|
+
@todos = validate_todos(value)
|
55
|
+
end
|
56
|
+
|
57
|
+
def files=(value)
|
58
|
+
raise ArgumentError, "Files must be a hash" unless value.is_a?(Hash)
|
59
|
+
@files = value
|
60
|
+
end
|
61
|
+
|
62
|
+
def messages=(value)
|
63
|
+
raise ArgumentError, "Messages must be an array" unless value.is_a?(Array)
|
64
|
+
@messages = value
|
65
|
+
end
|
66
|
+
|
67
|
+
def get(key, default = nil)
|
68
|
+
case key.to_s
|
69
|
+
when "todos"
|
70
|
+
@todos
|
71
|
+
when "files"
|
72
|
+
@files
|
73
|
+
when "messages"
|
74
|
+
@messages
|
75
|
+
else
|
76
|
+
default
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def update(todos: nil, files: nil, messages: nil)
|
81
|
+
begin
|
82
|
+
if todos
|
83
|
+
@todos = validate_todos(todos)
|
84
|
+
end
|
85
|
+
|
86
|
+
if files
|
87
|
+
raise ArgumentError, "Files must be a hash" unless files.is_a?(Hash)
|
88
|
+
@files.merge!(files)
|
89
|
+
end
|
90
|
+
|
91
|
+
if messages
|
92
|
+
raise ArgumentError, "Messages must be an array" unless messages.is_a?(Array)
|
93
|
+
@messages += messages
|
94
|
+
end
|
95
|
+
rescue => e
|
96
|
+
raise StateError, "Failed to update state: #{e.message}"
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def [](key)
|
101
|
+
get(key)
|
102
|
+
end
|
103
|
+
|
104
|
+
def []=(key, value)
|
105
|
+
case key.to_s
|
106
|
+
when "todos"
|
107
|
+
self.todos = value
|
108
|
+
when "files"
|
109
|
+
self.files = value
|
110
|
+
when "messages"
|
111
|
+
self.messages = value
|
112
|
+
else
|
113
|
+
raise ArgumentError, "Unknown state key: #{key}"
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def to_h
|
118
|
+
{
|
119
|
+
todos: @todos.map(&:to_h),
|
120
|
+
files: @files,
|
121
|
+
messages: @messages
|
122
|
+
}
|
123
|
+
end
|
124
|
+
|
125
|
+
private
|
126
|
+
|
127
|
+
def validate_todos(todos)
|
128
|
+
raise ArgumentError, "Todos must be an array" unless todos.is_a?(Array)
|
129
|
+
|
130
|
+
todos.map do |todo|
|
131
|
+
if todo.is_a?(Todo)
|
132
|
+
todo
|
133
|
+
else
|
134
|
+
Todo.from_h(todo)
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
@@ -0,0 +1,125 @@
|
|
1
|
+
module DeepAgentsRb
|
2
|
+
# SubAgent class to define sub-agents
|
3
|
+
class SubAgent
|
4
|
+
attr_reader :name, :description, :prompt, :tools
|
5
|
+
|
6
|
+
def initialize(name:, description:, prompt:, tools: nil)
|
7
|
+
@name = name
|
8
|
+
@description = description
|
9
|
+
@prompt = prompt
|
10
|
+
@tools = tools
|
11
|
+
end
|
12
|
+
|
13
|
+
def to_h
|
14
|
+
hash = {
|
15
|
+
name: @name,
|
16
|
+
description: @description,
|
17
|
+
prompt: @prompt
|
18
|
+
}
|
19
|
+
hash[:tools] = @tools if @tools
|
20
|
+
hash
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.from_h(hash)
|
24
|
+
new(
|
25
|
+
name: hash[:name],
|
26
|
+
description: hash[:description],
|
27
|
+
prompt: hash[:prompt],
|
28
|
+
tools: hash[:tools]
|
29
|
+
)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
module SubAgentSystem
|
34
|
+
# Task description templates
|
35
|
+
TASK_DESCRIPTION_PREFIX = "Use this tool to delegate a task to a sub-agent. Available sub-agents:\n- general-purpose: A general purpose agent with the same capabilities as the main agent\n{other_agents}"
|
36
|
+
TASK_DESCRIPTION_SUFFIX = "\n\nThe sub-agent will have access to the same tools as you do, and will be able to use them to complete the task."
|
37
|
+
|
38
|
+
# Create a task tool for delegating to sub-agents
|
39
|
+
def self.create_task_tool(tools, instructions, subagents, model, state_class)
|
40
|
+
# Create agents hash with general-purpose agent
|
41
|
+
agents = {
|
42
|
+
"general-purpose" => ReactAgent.new(model, instructions, tools, state_class)
|
43
|
+
}
|
44
|
+
|
45
|
+
# Create tools by name hash
|
46
|
+
tools_by_name = {}
|
47
|
+
tools.each do |tool|
|
48
|
+
tools_by_name[tool.name] = tool
|
49
|
+
end
|
50
|
+
|
51
|
+
# Create sub-agents
|
52
|
+
subagents.each do |agent|
|
53
|
+
if agent.tools
|
54
|
+
agent_tools = agent.tools.map { |t| tools_by_name[t] }
|
55
|
+
else
|
56
|
+
agent_tools = tools
|
57
|
+
end
|
58
|
+
|
59
|
+
agents[agent.name] = ReactAgent.new(model, agent.prompt, agent_tools, state_class)
|
60
|
+
end
|
61
|
+
|
62
|
+
# Create other agents string for task description
|
63
|
+
other_agents_string = subagents.map do |agent|
|
64
|
+
"- #{agent.name}: #{agent.description}"
|
65
|
+
end
|
66
|
+
|
67
|
+
# Create task tool
|
68
|
+
Tools::Tool.new(
|
69
|
+
"task",
|
70
|
+
TASK_DESCRIPTION_PREFIX.gsub("{other_agents}", other_agents_string.join("\n")) + TASK_DESCRIPTION_SUFFIX
|
71
|
+
) do |description, subagent_type, state, tool_call_id:|
|
72
|
+
if !agents.key?(subagent_type)
|
73
|
+
return "Error: invoked agent of type #{subagent_type}, the only allowed types are #{agents.keys.map { |k| "`#{k}`" }}"
|
74
|
+
end
|
75
|
+
|
76
|
+
sub_agent = agents[subagent_type]
|
77
|
+
sub_state = state.dup
|
78
|
+
sub_state.messages = [{ role: "user", content: description }]
|
79
|
+
|
80
|
+
result = sub_agent.invoke(sub_state)
|
81
|
+
|
82
|
+
Tools::Command.new(
|
83
|
+
update: {
|
84
|
+
files: result.get("files", {}),
|
85
|
+
messages: [
|
86
|
+
Tools::ToolMessage.new(
|
87
|
+
result.messages.last[:content],
|
88
|
+
tool_call_id: tool_call_id
|
89
|
+
)
|
90
|
+
]
|
91
|
+
}
|
92
|
+
)
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
# Simple React Agent implementation
|
98
|
+
class ReactAgent
|
99
|
+
attr_reader :model, :prompt, :tools, :state_class
|
100
|
+
|
101
|
+
def initialize(model, prompt, tools, state_class)
|
102
|
+
@model = model
|
103
|
+
@prompt = prompt
|
104
|
+
@tools = tools
|
105
|
+
@state_class = state_class
|
106
|
+
end
|
107
|
+
|
108
|
+
def invoke(state)
|
109
|
+
# In a real implementation, this would use the LLM to:
|
110
|
+
# 1. Process the input state
|
111
|
+
# 2. Decide which tool to use
|
112
|
+
# 3. Call the tool
|
113
|
+
# 4. Process the tool result
|
114
|
+
# 5. Repeat until done
|
115
|
+
# 6. Return the final response
|
116
|
+
|
117
|
+
# For now, we'll just return a simple response
|
118
|
+
state.update(
|
119
|
+
messages: [{ role: "assistant", content: "I've completed the task: #{state.messages.last[:content]}" }]
|
120
|
+
)
|
121
|
+
|
122
|
+
state
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|