ai-agents 0.4.3 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +7 -9
- data/CHANGELOG.md +27 -0
- data/CLAUDE.md +1 -3
- data/docs/concepts/callbacks.md +1 -1
- data/docs/guides/multi-agent-systems.md +13 -13
- data/docs/guides/rails-integration.md +56 -56
- data/docs/guides/request-headers.md +91 -0
- data/docs/guides/structured-output.md +2 -2
- data/docs/guides.md +1 -0
- data/docs/index.md +1 -1
- data/examples/isp-support/agents_factory.rb +33 -32
- data/examples/isp-support/interactive.rb +72 -32
- data/lib/agents/agent.rb +7 -4
- data/lib/agents/agent_runner.rb +3 -2
- data/lib/agents/handoff.rb +13 -5
- data/lib/agents/helpers/headers.rb +29 -0
- data/lib/agents/helpers/message_extractor.rb +88 -0
- data/lib/agents/helpers.rb +9 -0
- data/lib/agents/runner.rb +115 -45
- data/lib/agents/version.rb +1 -1
- data/lib/agents.rb +1 -2
- metadata +7 -5
- data/lib/agents/chat.rb +0 -161
- data/lib/agents/message_extractor.rb +0 -97
data/lib/agents/runner.rb
CHANGED
@@ -1,7 +1,5 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require_relative "message_extractor"
|
4
|
-
|
5
3
|
module Agents
|
6
4
|
# The execution engine that orchestrates conversations between users and agents.
|
7
5
|
# Runner manages the conversation flow, handles tool execution through RubyLLM,
|
@@ -55,6 +53,7 @@ module Agents
|
|
55
53
|
DEFAULT_MAX_TURNS = 10
|
56
54
|
|
57
55
|
class MaxTurnsExceeded < StandardError; end
|
56
|
+
class AgentNotFoundError < StandardError; end
|
58
57
|
|
59
58
|
# Create a thread-safe agent runner for multi-agent conversations.
|
60
59
|
# The first agent becomes the default entry point for new conversations.
|
@@ -79,9 +78,10 @@ module Agents
|
|
79
78
|
# @param context [Hash] Shared context data accessible to all tools
|
80
79
|
# @param registry [Hash] Registry of agents for handoff resolution
|
81
80
|
# @param max_turns [Integer] Maximum conversation turns before stopping
|
81
|
+
# @param headers [Hash, nil] Custom HTTP headers passed to the underlying LLM provider
|
82
82
|
# @param callbacks [Hash] Optional callbacks for real-time event notifications
|
83
83
|
# @return [RunResult] The result containing output, messages, and usage
|
84
|
-
def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX_TURNS, callbacks: {})
|
84
|
+
def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX_TURNS, headers: nil, callbacks: {})
|
85
85
|
# The starting_agent is already determined by AgentRunner based on conversation history
|
86
86
|
current_agent = starting_agent
|
87
87
|
|
@@ -90,15 +90,22 @@ module Agents
|
|
90
90
|
context_wrapper = RunContext.new(context_copy, callbacks: callbacks)
|
91
91
|
current_turn = 0
|
92
92
|
|
93
|
+
runtime_headers = Helpers::Headers.normalize(headers)
|
94
|
+
agent_headers = Helpers::Headers.normalize(current_agent.headers)
|
95
|
+
|
93
96
|
# Create chat and restore conversation history
|
94
|
-
chat =
|
97
|
+
chat = RubyLLM::Chat.new(model: current_agent.model)
|
98
|
+
current_headers = Helpers::Headers.merge(agent_headers, runtime_headers)
|
99
|
+
apply_headers(chat, current_headers)
|
100
|
+
configure_chat_for_agent(chat, current_agent, context_wrapper, replace: false)
|
95
101
|
restore_conversation_history(chat, context_wrapper)
|
96
102
|
|
103
|
+
|
97
104
|
loop do
|
98
105
|
current_turn += 1
|
99
106
|
raise MaxTurnsExceeded, "Exceeded maximum turns: #{max_turns}" if current_turn > max_turns
|
100
107
|
|
101
|
-
# Get response from LLM (
|
108
|
+
# Get response from LLM (RubyLLM handles tool execution with halting based handoff detection)
|
102
109
|
result = if current_turn == 1
|
103
110
|
# Emit agent thinking event for initial message
|
104
111
|
context_wrapper.callback_manager.emit_agent_thinking(current_agent.name, input)
|
@@ -110,17 +117,23 @@ module Agents
|
|
110
117
|
end
|
111
118
|
response = result
|
112
119
|
|
113
|
-
# Check for handoff
|
114
|
-
if response.is_a?(
|
115
|
-
|
120
|
+
# Check for handoff via RubyLLM's halt mechanism
|
121
|
+
if response.is_a?(RubyLLM::Tool::Halt) && context_wrapper.context[:pending_handoff]
|
122
|
+
handoff_info = context_wrapper.context.delete(:pending_handoff)
|
123
|
+
next_agent = handoff_info[:target_agent]
|
116
124
|
|
117
125
|
# Validate that the target agent is in our registry
|
118
126
|
# This prevents handoffs to agents that weren't explicitly provided
|
119
127
|
unless registry[next_agent.name]
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
128
|
+
save_conversation_state(chat, context_wrapper, current_agent)
|
129
|
+
error = AgentNotFoundError.new("Handoff failed: Agent '#{next_agent.name}' not found in registry")
|
130
|
+
return RunResult.new(
|
131
|
+
output: nil,
|
132
|
+
messages: Helpers::MessageExtractor.extract_messages(chat, current_agent),
|
133
|
+
usage: context_wrapper.usage,
|
134
|
+
context: context_wrapper.context,
|
135
|
+
error: error
|
136
|
+
)
|
124
137
|
end
|
125
138
|
|
126
139
|
# Save current conversation state before switching
|
@@ -133,9 +146,11 @@ module Agents
|
|
133
146
|
current_agent = next_agent
|
134
147
|
context_wrapper.context[:current_agent] = next_agent.name
|
135
148
|
|
136
|
-
#
|
137
|
-
chat
|
138
|
-
|
149
|
+
# Reconfigure existing chat for new agent - preserves conversation history automatically
|
150
|
+
configure_chat_for_agent(chat, current_agent, context_wrapper, replace: true)
|
151
|
+
agent_headers = Helpers::Headers.normalize(current_agent.headers)
|
152
|
+
current_headers = Helpers::Headers.merge(agent_headers, runtime_headers)
|
153
|
+
apply_headers(chat, current_headers)
|
139
154
|
|
140
155
|
# Force the new agent to respond to the conversation context
|
141
156
|
# This ensures the user gets a response from the new agent
|
@@ -143,6 +158,17 @@ module Agents
|
|
143
158
|
next
|
144
159
|
end
|
145
160
|
|
161
|
+
# Handle non-handoff halts - return the halt content as final response
|
162
|
+
if response.is_a?(RubyLLM::Tool::Halt)
|
163
|
+
save_conversation_state(chat, context_wrapper, current_agent)
|
164
|
+
return RunResult.new(
|
165
|
+
output: response.content,
|
166
|
+
messages: Helpers::MessageExtractor.extract_messages(chat, current_agent),
|
167
|
+
usage: context_wrapper.usage,
|
168
|
+
context: context_wrapper.context
|
169
|
+
)
|
170
|
+
end
|
171
|
+
|
146
172
|
# If tools were called, continue the loop to let them execute
|
147
173
|
next if response.tool_call?
|
148
174
|
|
@@ -153,7 +179,7 @@ module Agents
|
|
153
179
|
|
154
180
|
return RunResult.new(
|
155
181
|
output: response.content,
|
156
|
-
messages: MessageExtractor.extract_messages(chat, current_agent),
|
182
|
+
messages: Helpers::MessageExtractor.extract_messages(chat, current_agent),
|
157
183
|
usage: context_wrapper.usage,
|
158
184
|
context: context_wrapper.context
|
159
185
|
)
|
@@ -164,7 +190,7 @@ module Agents
|
|
164
190
|
|
165
191
|
RunResult.new(
|
166
192
|
output: "Conversation ended: #{e.message}",
|
167
|
-
messages: chat ? MessageExtractor.extract_messages(chat, current_agent) : [],
|
193
|
+
messages: chat ? Helpers::MessageExtractor.extract_messages(chat, current_agent) : [],
|
168
194
|
usage: context_wrapper.usage,
|
169
195
|
error: e,
|
170
196
|
context: context_wrapper.context
|
@@ -175,7 +201,7 @@ module Agents
|
|
175
201
|
|
176
202
|
RunResult.new(
|
177
203
|
output: nil,
|
178
|
-
messages: chat ? MessageExtractor.extract_messages(chat, current_agent) : [],
|
204
|
+
messages: chat ? Helpers::MessageExtractor.extract_messages(chat, current_agent) : [],
|
179
205
|
usage: context_wrapper.usage,
|
180
206
|
error: e,
|
181
207
|
context: context_wrapper.context
|
@@ -184,6 +210,11 @@ module Agents
|
|
184
210
|
|
185
211
|
private
|
186
212
|
|
213
|
+
# Creates a deep copy of context data for thread safety.
|
214
|
+
# Preserves conversation history array structure while avoiding agent mutation.
|
215
|
+
#
|
216
|
+
# @param context [Hash] The context to copy
|
217
|
+
# @return [Hash] Thread-safe deep copy of the context
|
187
218
|
def deep_copy_context(context)
|
188
219
|
# Handle deep copying for thread safety
|
189
220
|
context.dup.tap do |copied|
|
@@ -194,31 +225,40 @@ module Agents
|
|
194
225
|
end
|
195
226
|
end
|
196
227
|
|
228
|
+
# Restores conversation history from context into RubyLLM chat.
|
229
|
+
# Converts stored message hashes back into RubyLLM::Message objects with proper content handling.
|
230
|
+
#
|
231
|
+
# @param chat [RubyLLM::Chat] The chat instance to restore history into
|
232
|
+
# @param context_wrapper [RunContext] Context containing conversation history
|
197
233
|
def restore_conversation_history(chat, context_wrapper)
|
198
234
|
history = context_wrapper.context[:conversation_history] || []
|
199
235
|
|
200
236
|
history.each do |msg|
|
201
237
|
# Only restore user and assistant messages with content
|
202
238
|
next unless %i[user assistant].include?(msg[:role].to_sym)
|
203
|
-
next unless msg[:content] && !MessageExtractor.content_empty?(msg[:content])
|
239
|
+
next unless msg[:content] && !Helpers::MessageExtractor.content_empty?(msg[:content])
|
204
240
|
|
205
|
-
|
241
|
+
# Extract text content safely - handle both string and hash content
|
242
|
+
content = RubyLLM::Content.new(msg[:content])
|
243
|
+
|
244
|
+
# Create a proper RubyLLM::Message and pass it to add_message
|
245
|
+
message = RubyLLM::Message.new(
|
206
246
|
role: msg[:role].to_sym,
|
207
|
-
content:
|
247
|
+
content: content
|
208
248
|
)
|
209
|
-
|
210
|
-
# Continue with partial history on error
|
211
|
-
puts "[Agents] Failed to restore message: #{e.message}"
|
249
|
+
chat.add_message(message)
|
212
250
|
end
|
213
|
-
rescue StandardError => e
|
214
|
-
# If history restoration completely fails, continue with empty history
|
215
|
-
puts "[Agents] Failed to restore conversation history: #{e.message}"
|
216
|
-
context_wrapper.context[:conversation_history] = []
|
217
251
|
end
|
218
252
|
|
253
|
+
# Saves current conversation state from RubyLLM chat back to context for persistence.
|
254
|
+
# Maintains conversation continuity across agent handoffs and process boundaries.
|
255
|
+
#
|
256
|
+
# @param chat [RubyLLM::Chat] The chat instance to extract state from
|
257
|
+
# @param context_wrapper [RunContext] Context to save state into
|
258
|
+
# @param current_agent [Agents::Agent] The currently active agent
|
219
259
|
def save_conversation_state(chat, context_wrapper, current_agent)
|
220
260
|
# Extract messages from chat
|
221
|
-
messages = MessageExtractor.extract_messages(chat, current_agent)
|
261
|
+
messages = Helpers::MessageExtractor.extract_messages(chat, current_agent)
|
222
262
|
|
223
263
|
# Update context with latest state
|
224
264
|
context_wrapper.context[:conversation_history] = messages
|
@@ -230,29 +270,59 @@ module Agents
|
|
230
270
|
context_wrapper.context.delete(:pending_handoff)
|
231
271
|
end
|
232
272
|
|
233
|
-
|
273
|
+
# Configures a RubyLLM chat instance with agent-specific settings.
|
274
|
+
# Uses RubyLLM's replace option to swap agent context while preserving conversation history during handoffs.
|
275
|
+
#
|
276
|
+
# @param chat [RubyLLM::Chat] The chat instance to configure
|
277
|
+
# @param agent [Agents::Agent] The agent whose configuration to apply
|
278
|
+
# @param context_wrapper [RunContext] Thread-safe context wrapper
|
279
|
+
# @param replace [Boolean] Whether to replace existing configuration (true for handoffs, false for initial setup)
|
280
|
+
# @return [RubyLLM::Chat] The configured chat instance
|
281
|
+
def configure_chat_for_agent(chat, agent, context_wrapper, replace: false)
|
234
282
|
# Get system prompt (may be dynamic)
|
235
283
|
system_prompt = agent.get_system_prompt(context_wrapper)
|
236
284
|
|
237
|
-
#
|
238
|
-
|
239
|
-
regular_tools = agent.tools
|
285
|
+
# Combine all tools - both handoff and regular tools need wrapping
|
286
|
+
all_tools = build_agent_tools(agent, context_wrapper)
|
240
287
|
|
241
|
-
#
|
242
|
-
|
288
|
+
# Switch model if different (important for handoffs between agents using different models)
|
289
|
+
chat.with_model(agent.model) if replace
|
243
290
|
|
244
|
-
#
|
245
|
-
chat
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
context_wrapper: context_wrapper, # Pass context directly
|
250
|
-
response_schema: agent.response_schema # Pass structured output schema
|
251
|
-
)
|
291
|
+
# Configure chat with instructions, temperature, tools, and schema
|
292
|
+
chat.with_instructions(system_prompt, replace: replace) if system_prompt
|
293
|
+
chat.with_temperature(agent.temperature) if agent.temperature
|
294
|
+
chat.with_tools(*all_tools, replace: replace)
|
295
|
+
chat.with_schema(agent.response_schema) if agent.response_schema
|
252
296
|
|
253
|
-
chat.with_instructions(system_prompt) if system_prompt
|
254
|
-
chat.with_tools(*wrapped_regular_tools) if wrapped_regular_tools.any?
|
255
297
|
chat
|
256
298
|
end
|
299
|
+
|
300
|
+
def apply_headers(chat, headers)
|
301
|
+
return if headers.empty?
|
302
|
+
|
303
|
+
chat.with_headers(**headers)
|
304
|
+
end
|
305
|
+
|
306
|
+
# Builds thread-safe tool wrappers for an agent's tools and handoff tools.
|
307
|
+
#
|
308
|
+
# @param agent [Agents::Agent] The agent whose tools to wrap
|
309
|
+
# @param context_wrapper [RunContext] Thread-safe context wrapper for tool execution
|
310
|
+
# @return [Array<ToolWrapper>] Array of wrapped tools ready for RubyLLM
|
311
|
+
def build_agent_tools(agent, context_wrapper)
|
312
|
+
all_tools = []
|
313
|
+
|
314
|
+
# Add handoff tools
|
315
|
+
agent.handoff_agents.each do |target_agent|
|
316
|
+
handoff_tool = HandoffTool.new(target_agent)
|
317
|
+
all_tools << ToolWrapper.new(handoff_tool, context_wrapper)
|
318
|
+
end
|
319
|
+
|
320
|
+
# Add regular tools
|
321
|
+
agent.tools.each do |tool|
|
322
|
+
all_tools << ToolWrapper.new(tool, context_wrapper)
|
323
|
+
end
|
324
|
+
|
325
|
+
all_tools
|
326
|
+
end
|
257
327
|
end
|
258
328
|
end
|
data/lib/agents/version.rb
CHANGED
data/lib/agents.rb
CHANGED
@@ -111,12 +111,11 @@ require_relative "agents/run_context"
|
|
111
111
|
require_relative "agents/tool_context"
|
112
112
|
require_relative "agents/tool"
|
113
113
|
require_relative "agents/handoff"
|
114
|
+
require_relative "agents/helpers"
|
114
115
|
require_relative "agents/agent"
|
115
116
|
|
116
117
|
# Execution components
|
117
|
-
require_relative "agents/chat"
|
118
118
|
require_relative "agents/tool_wrapper"
|
119
|
-
require_relative "agents/message_extractor"
|
120
119
|
require_relative "agents/callback_manager"
|
121
120
|
require_relative "agents/agent_runner"
|
122
121
|
require_relative "agents/runner"
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ai-agents
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.6.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Shivam Mishra
|
@@ -15,14 +15,14 @@ dependencies:
|
|
15
15
|
requirements:
|
16
16
|
- - "~>"
|
17
17
|
- !ruby/object:Gem::Version
|
18
|
-
version:
|
18
|
+
version: 1.8.2
|
19
19
|
type: :runtime
|
20
20
|
prerelease: false
|
21
21
|
version_requirements: !ruby/object:Gem::Requirement
|
22
22
|
requirements:
|
23
23
|
- - "~>"
|
24
24
|
- !ruby/object:Gem::Version
|
25
|
-
version:
|
25
|
+
version: 1.8.2
|
26
26
|
description: Ruby AI Agents SDK enables creating complex AI workflows with multi-agent
|
27
27
|
orchestration, tool execution, safety guardrails, and provider-agnostic LLM integration.
|
28
28
|
email:
|
@@ -60,6 +60,7 @@ files:
|
|
60
60
|
- docs/guides/agent-as-tool-pattern.md
|
61
61
|
- docs/guides/multi-agent-systems.md
|
62
62
|
- docs/guides/rails-integration.md
|
63
|
+
- docs/guides/request-headers.md
|
63
64
|
- docs/guides/state-persistence.md
|
64
65
|
- docs/guides/structured-output.md
|
65
66
|
- docs/index.md
|
@@ -101,9 +102,10 @@ files:
|
|
101
102
|
- lib/agents/agent_runner.rb
|
102
103
|
- lib/agents/agent_tool.rb
|
103
104
|
- lib/agents/callback_manager.rb
|
104
|
-
- lib/agents/chat.rb
|
105
105
|
- lib/agents/handoff.rb
|
106
|
-
- lib/agents/
|
106
|
+
- lib/agents/helpers.rb
|
107
|
+
- lib/agents/helpers/headers.rb
|
108
|
+
- lib/agents/helpers/message_extractor.rb
|
107
109
|
- lib/agents/result.rb
|
108
110
|
- lib/agents/run_context.rb
|
109
111
|
- lib/agents/runner.rb
|
data/lib/agents/chat.rb
DELETED
@@ -1,161 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require_relative "tool_context"
|
4
|
-
|
5
|
-
module Agents
|
6
|
-
# Extended chat class that inherits from RubyLLM::Chat but adds proper handoff handling.
|
7
|
-
# This solves the infinite handoff loop problem by treating handoffs as turn-ending
|
8
|
-
# operations rather than allowing auto-continuation.
|
9
|
-
class Chat < RubyLLM::Chat
|
10
|
-
# Response object that indicates a handoff occurred
|
11
|
-
class HandoffResponse
|
12
|
-
attr_reader :target_agent, :response, :handoff_message
|
13
|
-
|
14
|
-
def initialize(target_agent:, response:, handoff_message:)
|
15
|
-
@target_agent = target_agent
|
16
|
-
@response = response
|
17
|
-
@handoff_message = handoff_message
|
18
|
-
end
|
19
|
-
|
20
|
-
def tool_call?
|
21
|
-
true
|
22
|
-
end
|
23
|
-
|
24
|
-
def content
|
25
|
-
@handoff_message
|
26
|
-
end
|
27
|
-
end
|
28
|
-
|
29
|
-
def initialize(model: nil, handoff_tools: [], context_wrapper: nil, temperature: nil, response_schema: nil,
|
30
|
-
**options)
|
31
|
-
super(model: model, **options)
|
32
|
-
@handoff_tools = handoff_tools
|
33
|
-
@context_wrapper = context_wrapper
|
34
|
-
|
35
|
-
# Set temperature if provided (RubyLLM::Chat sets this via accessor)
|
36
|
-
@temperature = temperature if temperature
|
37
|
-
|
38
|
-
# Set response schema if provided
|
39
|
-
with_schema(response_schema) if response_schema
|
40
|
-
|
41
|
-
# Register handoff tools with RubyLLM for schema generation
|
42
|
-
@handoff_tools.each { |tool| with_tool(tool) }
|
43
|
-
end
|
44
|
-
|
45
|
-
# Override the problematic auto-execution method from RubyLLM::Chat
|
46
|
-
def complete(&block)
|
47
|
-
@on[:new_message]&.call
|
48
|
-
response = @provider.complete(
|
49
|
-
messages,
|
50
|
-
tools: @tools,
|
51
|
-
temperature: @temperature,
|
52
|
-
model: @model.id,
|
53
|
-
connection: @connection,
|
54
|
-
params: @params,
|
55
|
-
schema: @schema,
|
56
|
-
&block
|
57
|
-
)
|
58
|
-
@on[:end_message]&.call(response)
|
59
|
-
|
60
|
-
# Handle JSON parsing for structured output (like RubyLLM::Chat)
|
61
|
-
if @schema && response.content.is_a?(String)
|
62
|
-
begin
|
63
|
-
response.content = JSON.parse(response.content)
|
64
|
-
rescue JSON::ParserError
|
65
|
-
# If parsing fails, keep content as string
|
66
|
-
end
|
67
|
-
end
|
68
|
-
|
69
|
-
add_message(response)
|
70
|
-
|
71
|
-
if response.tool_call?
|
72
|
-
handle_tools_with_handoff_detection(response, &block)
|
73
|
-
else
|
74
|
-
response
|
75
|
-
end
|
76
|
-
end
|
77
|
-
|
78
|
-
private
|
79
|
-
|
80
|
-
def handle_tools_with_handoff_detection(response, &block)
|
81
|
-
handoff_calls, regular_calls = classify_tool_calls(response.tool_calls)
|
82
|
-
|
83
|
-
if handoff_calls.any?
|
84
|
-
# Execute first handoff only
|
85
|
-
handoff_result = execute_handoff_tool(handoff_calls.first)
|
86
|
-
|
87
|
-
# Add tool result to conversation
|
88
|
-
add_tool_result(handoff_calls.first.id, handoff_result[:message])
|
89
|
-
|
90
|
-
# Return handoff response to signal agent switch (ends turn)
|
91
|
-
HandoffResponse.new(
|
92
|
-
target_agent: handoff_result[:target_agent],
|
93
|
-
response: response,
|
94
|
-
handoff_message: handoff_result[:message]
|
95
|
-
)
|
96
|
-
else
|
97
|
-
# Use RubyLLM's original tool execution for regular tools
|
98
|
-
execute_regular_tools_and_continue(regular_calls, &block)
|
99
|
-
end
|
100
|
-
end
|
101
|
-
|
102
|
-
def classify_tool_calls(tool_calls)
|
103
|
-
handoff_tool_names = @handoff_tools.map(&:name).map(&:to_s)
|
104
|
-
|
105
|
-
handoff_calls = []
|
106
|
-
regular_calls = []
|
107
|
-
|
108
|
-
tool_calls.each_value do |tool_call|
|
109
|
-
if handoff_tool_names.include?(tool_call.name)
|
110
|
-
handoff_calls << tool_call
|
111
|
-
else
|
112
|
-
regular_calls << tool_call
|
113
|
-
end
|
114
|
-
end
|
115
|
-
|
116
|
-
[handoff_calls, regular_calls]
|
117
|
-
end
|
118
|
-
|
119
|
-
def execute_handoff_tool(tool_call)
|
120
|
-
tool = @handoff_tools.find { |t| t.name.to_s == tool_call.name }
|
121
|
-
raise "Handoff tool not found: #{tool_call.name}" unless tool
|
122
|
-
|
123
|
-
# Execute the handoff tool directly with context
|
124
|
-
tool_context = ToolContext.new(run_context: @context_wrapper)
|
125
|
-
result = tool.execute(tool_context, **{}) # Handoff tools take no additional params
|
126
|
-
|
127
|
-
{
|
128
|
-
target_agent: tool.target_agent,
|
129
|
-
message: result.to_s
|
130
|
-
}
|
131
|
-
end
|
132
|
-
|
133
|
-
def execute_regular_tools_and_continue(tool_calls, &block)
|
134
|
-
# Execute each regular tool call
|
135
|
-
tool_calls.each do |tool_call|
|
136
|
-
@on[:new_message]&.call
|
137
|
-
result = execute_tool(tool_call)
|
138
|
-
message = add_tool_result(tool_call.id, result)
|
139
|
-
@on[:end_message]&.call(message)
|
140
|
-
end
|
141
|
-
|
142
|
-
# Continue conversation after tool execution
|
143
|
-
complete(&block)
|
144
|
-
end
|
145
|
-
|
146
|
-
# Reuse RubyLLM's existing tool execution logic
|
147
|
-
def execute_tool(tool_call)
|
148
|
-
tool = tools[tool_call.name.to_sym]
|
149
|
-
args = tool_call.arguments
|
150
|
-
tool.call(args)
|
151
|
-
end
|
152
|
-
|
153
|
-
def add_tool_result(tool_use_id, result)
|
154
|
-
add_message(
|
155
|
-
role: :tool,
|
156
|
-
content: result.is_a?(Hash) && result[:error] ? result[:error] : result.to_s,
|
157
|
-
tool_call_id: tool_use_id
|
158
|
-
)
|
159
|
-
end
|
160
|
-
end
|
161
|
-
end
|
@@ -1,97 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Agents
|
4
|
-
# Service object responsible for extracting and formatting conversation messages
|
5
|
-
# from RubyLLM chat objects into a format suitable for persistence and context restoration.
|
6
|
-
#
|
7
|
-
# Handles different message types:
|
8
|
-
# - User messages: Basic content preservation
|
9
|
-
# - Assistant messages: Includes agent attribution and tool calls
|
10
|
-
# - Tool result messages: Links back to original tool calls
|
11
|
-
#
|
12
|
-
# @example Extract messages from a chat
|
13
|
-
# messages = MessageExtractor.extract_messages(chat, current_agent)
|
14
|
-
# #=> [
|
15
|
-
# { role: :user, content: "Hello" },
|
16
|
-
# { role: :assistant, content: "Hi!", agent_name: "Support", tool_calls: [...] },
|
17
|
-
# { role: :tool, content: "Result", tool_call_id: "call_123" }
|
18
|
-
# ]
|
19
|
-
class MessageExtractor
|
20
|
-
# Check if content is considered empty (handles both String and Hash content)
|
21
|
-
#
|
22
|
-
# @param content [String, Hash, nil] The content to check
|
23
|
-
# @return [Boolean] true if content is empty, false otherwise
|
24
|
-
def self.content_empty?(content)
|
25
|
-
case content
|
26
|
-
when String
|
27
|
-
content.strip.empty?
|
28
|
-
when Hash
|
29
|
-
content.empty?
|
30
|
-
else
|
31
|
-
content.nil?
|
32
|
-
end
|
33
|
-
end
|
34
|
-
|
35
|
-
# Extract messages from a chat object for conversation history persistence
|
36
|
-
#
|
37
|
-
# @param chat [Object] Chat object that responds to :messages
|
38
|
-
# @param current_agent [Agent] The agent currently handling the conversation
|
39
|
-
# @return [Array<Hash>] Array of message hashes suitable for persistence
|
40
|
-
def self.extract_messages(chat, current_agent)
|
41
|
-
new(chat, current_agent).extract
|
42
|
-
end
|
43
|
-
|
44
|
-
def initialize(chat, current_agent)
|
45
|
-
@chat = chat
|
46
|
-
@current_agent = current_agent
|
47
|
-
end
|
48
|
-
|
49
|
-
def extract
|
50
|
-
return [] unless @chat.respond_to?(:messages)
|
51
|
-
|
52
|
-
@chat.messages.filter_map do |msg|
|
53
|
-
case msg.role
|
54
|
-
when :user, :assistant
|
55
|
-
extract_user_or_assistant_message(msg)
|
56
|
-
when :tool
|
57
|
-
extract_tool_message(msg)
|
58
|
-
end
|
59
|
-
end
|
60
|
-
end
|
61
|
-
|
62
|
-
private
|
63
|
-
|
64
|
-
def extract_user_or_assistant_message(msg)
|
65
|
-
return nil unless msg.content && !self.class.content_empty?(msg.content)
|
66
|
-
|
67
|
-
message = {
|
68
|
-
role: msg.role,
|
69
|
-
content: msg.content
|
70
|
-
}
|
71
|
-
|
72
|
-
if msg.role == :assistant
|
73
|
-
# Add agent attribution for conversation continuity
|
74
|
-
message[:agent_name] = @current_agent.name if @current_agent
|
75
|
-
|
76
|
-
# Add tool calls if present
|
77
|
-
if msg.tool_call? && msg.tool_calls
|
78
|
-
# RubyLLM stores tool_calls as Hash with call_id => ToolCall object
|
79
|
-
# Reference: RubyLLM::StreamAccumulator#tool_calls_from_stream
|
80
|
-
message[:tool_calls] = msg.tool_calls.values.map(&:to_h)
|
81
|
-
end
|
82
|
-
end
|
83
|
-
|
84
|
-
message
|
85
|
-
end
|
86
|
-
|
87
|
-
def extract_tool_message(msg)
|
88
|
-
return nil unless msg.tool_result?
|
89
|
-
|
90
|
-
{
|
91
|
-
role: msg.role,
|
92
|
-
content: msg.content,
|
93
|
-
tool_call_id: msg.tool_call_id
|
94
|
-
}
|
95
|
-
end
|
96
|
-
end
|
97
|
-
end
|