ai-agents 0.4.3 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,6 +6,7 @@ require_relative "tools/create_lead_tool"
6
6
  require_relative "tools/create_checkout_tool"
7
7
  require_relative "tools/search_docs_tool"
8
8
  require_relative "tools/escalate_to_human_tool"
9
+ require "ruby_llm/schema"
9
10
 
10
11
  module ISPSupport
11
12
  # Factory for creating all ISP support agents with proper handoff relationships.
@@ -56,7 +57,8 @@ module ISPSupport
56
57
  instructions: sales_instructions_with_state,
57
58
  model: "gpt-4.1-mini",
58
59
  tools: [ISPSupport::CreateLeadTool.new, ISPSupport::CreateCheckoutTool.new],
59
- temperature: 0.8 # Higher temperature for more persuasive, varied sales language
60
+ temperature: 0.8, # Higher temperature for more persuasive, varied sales language
61
+ response_schema: sales_response_schema
60
62
  )
61
63
  end
62
64
 
@@ -70,7 +72,8 @@ module ISPSupport
70
72
  ISPSupport::SearchDocsTool.new,
71
73
  ISPSupport::EscalateToHumanTool.new
72
74
  ],
73
- temperature: 0.5 # Balanced temperature for helpful but consistent technical support
75
+ temperature: 0.5, # Balanced temperature for helpful but consistent technical support
76
+ response_schema: triage_response_schema
74
77
  )
75
78
  end
76
79
 
@@ -90,40 +93,38 @@ module ISPSupport
90
93
 
91
94
  Keep responses brief and professional. Use handoff tools to transfer to specialists.
92
95
 
93
- Your response MUST be in the required JSON format with greeting, intent_category, needs_clarification, clarifying_question, and recommended_agent fields.
96
+ Your response MUST be in the required JSON format with response, clarifying_question, needs_clarification, and intent fields.
94
97
  INSTRUCTIONS
95
98
  end
96
99
 
97
100
  def triage_response_schema
98
- {
99
- type: "object",
100
- properties: {
101
- greeting: {
102
- type: "string",
103
- description: "A brief, friendly greeting acknowledging the customer's inquiry"
104
- },
105
- intent_category: {
106
- type: "string",
107
- enum: %w[sales support unclear],
108
- description: "The detected category of the customer's intent"
109
- },
110
- needs_clarification: {
111
- type: "boolean",
112
- description: "Whether the intent is unclear and needs clarification"
113
- },
114
- clarifying_question: {
115
- type: ["string", "null"],
116
- description: "A question to ask if the intent is unclear (null if clear)"
117
- },
118
- recommended_agent: {
119
- type: ["string", "null"],
120
- enum: ["Sales Agent", "Support Agent", null],
121
- description: "The recommended specialist agent to route to (null if unclear)"
122
- }
123
- },
124
- required: %w[greeting intent_category needs_clarification],
125
- additionalProperties: false
126
- }
101
+ RubyLLM::Schema.create do
102
+ string :response, description: "Your response to the customer"
103
+ string :intent, enum: %w[sales support unclear], description: "The detected intent category"
104
+ array :sentiment, description: "Customer sentiment indicators" do
105
+ string enum: %w[positive neutral negative frustrated urgent confused satisfied]
106
+ end
107
+ end
108
+ end
109
+
110
+ def support_response_schema
111
+ RubyLLM::Schema.create do
112
+ string :response, description: "Your response to the customer"
113
+ string :intent, enum: %w[support], description: "The intent category (always support)"
114
+ array :sentiment, description: "Customer sentiment indicators" do
115
+ string enum: %w[positive neutral negative frustrated urgent confused satisfied]
116
+ end
117
+ end
118
+ end
119
+
120
+ def sales_response_schema
121
+ RubyLLM::Schema.create do
122
+ string :response, description: "Your response to the customer"
123
+ string :intent, enum: %w[sales], description: "The intent category (always sales)"
124
+ array :sentiment, description: "Customer sentiment indicators" do
125
+ string enum: %w[positive neutral negative frustrated urgent confused satisfied]
126
+ end
127
+ end
127
128
  end
128
129
 
129
130
  def sales_instructions
@@ -2,6 +2,7 @@
2
2
  # frozen_string_literal: true
3
3
 
4
4
  require "json"
5
+ require "readline"
5
6
  require_relative "../../lib/agents"
6
7
  require_relative "agents_factory"
7
8
 
@@ -29,78 +30,96 @@ class ISPSupportDemo
29
30
  @context = {}
30
31
  @current_status = ""
31
32
 
32
- puts "🏢 Welcome to ISP Customer Support!"
33
- puts "Type '/help' for commands or 'exit' to quit."
33
+ puts green("🏢 Welcome to ISP Customer Support!")
34
+ puts dim_text("Type '/help' for commands or 'exit' to quit.")
34
35
  puts
35
36
  end
36
37
 
37
38
  def start
38
39
  loop do
39
- print "💬 You: "
40
- user_input = gets.chomp.strip
40
+ user_input = Readline.readline(cyan("\u{1F4AC} You: "), true)
41
+ next unless user_input # Handle Ctrl+D
41
42
 
43
+ user_input = user_input.strip
42
44
  command_result = handle_command(user_input)
43
45
  break if command_result == :exit
44
46
  next if command_result == :handled || user_input.empty?
45
47
 
46
48
  # Clear any previous status and show agent is working
47
49
  clear_status_line
48
- print "🤖 Processing..."
50
+ print yellow("🤖 Processing...")
49
51
 
50
- # Use the runner - it automatically determines the right agent from context
51
- result = @runner.run(user_input, context: @context)
52
+ begin
53
+ # Use the runner - it automatically determines the right agent from context
54
+ result = @runner.run(user_input, context: @context)
52
55
 
53
- # Update our context with the returned context from Runner
54
- @context = result.context if result.respond_to?(:context) && result.context
56
+ # Update our context with the returned context from Runner
57
+ @context = result.context if result.respond_to?(:context) && result.context
55
58
 
56
- # Clear status and show response
57
- clear_status_line
59
+ # Clear status and show response with callback history
60
+ clear_status_line
61
+
62
+ # Display callback messages if any
63
+ if @callback_messages.any?
64
+ puts dim_text(@callback_messages.join("\n"))
65
+ @callback_messages.clear
66
+ end
58
67
 
59
- # Handle structured output from triage agent
60
- output = result.output || "[No output]"
61
- if @context[:current_agent] == "Triage Agent" && output.start_with?("{")
62
- begin
63
- structured = JSON.parse(output)
64
- # Display the greeting from structured response
65
- puts "🤖 #{structured["greeting"]}"
66
- if structured["intent_category"]
67
- puts " [Intent: #{structured["intent_category"]}, Routing to: #{structured["recommended_agent"] || "TBD"}]"
68
- end
69
- rescue JSON::ParserError
70
- # Fall back to regular output if not valid JSON
68
+ # Handle structured output from agents
69
+ output = result.output || "[No output]"
70
+
71
+ if output.is_a?(Hash) && output.key?("response")
72
+ # Display the response from structured response
73
+ puts "🤖 #{output["response"]}"
74
+ puts dim_text(" [Intent]: #{output["intent"]}") if output["intent"]
75
+ puts dim_text(" [Sentiment]: #{output["sentiment"].join(", ")}") if output["sentiment"]&.any?
76
+ else
71
77
  puts "🤖 #{output}"
72
78
  end
73
- else
74
- puts "🤖 #{output}"
75
- end
76
79
 
77
- puts
80
+ puts # Add blank line after agent response
81
+ rescue StandardError => e
82
+ clear_status_line
83
+ puts red("❌ Error: #{e.message}")
84
+ puts dim_text("Please try again or type '/help' for assistance.")
85
+ puts # Add blank line after error message
86
+ end
78
87
  end
79
88
  end
80
89
 
81
90
  private
82
91
 
83
92
  def setup_callbacks
93
+ @callback_messages = []
94
+
84
95
  @runner.on_agent_thinking do |agent_name, _input|
85
- update_status("🧠 #{agent_name} is thinking...")
96
+ message = "🧠 #{agent_name} is thinking..."
97
+ update_status(message)
98
+ @callback_messages << message
86
99
  end
87
100
 
88
101
  @runner.on_tool_start do |tool_name, _args|
89
- update_status("🔧 Using #{tool_name}...")
102
+ message = "🔧 Using #{tool_name}..."
103
+ update_status(message)
104
+ @callback_messages << message
90
105
  end
91
106
 
92
107
  @runner.on_tool_complete do |tool_name, _result|
93
- update_status("✅ #{tool_name} completed")
108
+ message = "✅ #{tool_name} completed"
109
+ update_status(message)
110
+ @callback_messages << message
94
111
  end
95
112
 
96
113
  @runner.on_agent_handoff do |from_agent, to_agent, _reason|
97
- update_status("🔄 Handoff: #{from_agent} → #{to_agent}")
114
+ message = "🔄 Handoff: #{from_agent} → #{to_agent}"
115
+ update_status(message)
116
+ @callback_messages << message
98
117
  end
99
118
  end
100
119
 
101
120
  def update_status(message)
102
121
  clear_status_line
103
- print message
122
+ print dim_text(message)
104
123
  $stdout.flush
105
124
  end
106
125
 
@@ -207,6 +226,27 @@ class ISPSupportDemo
207
226
  else "Unknown agent"
208
227
  end
209
228
  end
229
+
230
+ # ANSI color helper methods
231
+ def dim_text(text)
232
+ "\e[90m#{text}\e[0m"
233
+ end
234
+
235
+ def green(text)
236
+ "\e[32m#{text}\e[0m"
237
+ end
238
+
239
+ def yellow(text)
240
+ "\e[33m#{text}\e[0m"
241
+ end
242
+
243
+ def red(text)
244
+ "\e[31m#{text}\e[0m"
245
+ end
246
+
247
+ def cyan(text)
248
+ "\e[36m#{text}\e[0m"
249
+ end
210
250
  end
211
251
 
212
252
  # Run the demo
data/lib/agents/agent.rb CHANGED
@@ -4,7 +4,7 @@
4
4
  # Agents are immutable, thread-safe objects that can be cloned with modifications.
5
5
  # They encapsulate the configuration needed to interact with an LLM including
6
6
  # instructions, tools, and potential handoff targets.
7
- #
7
+ require_relative "helpers/headers"
8
8
  # @example Creating a basic agent
9
9
  # agent = Agents::Agent.new(
10
10
  # name: "Assistant",
@@ -50,7 +50,7 @@
50
50
  # )
51
51
  module Agents
52
52
  class Agent
53
- attr_reader :name, :instructions, :model, :tools, :handoff_agents, :temperature, :response_schema
53
+ attr_reader :name, :instructions, :model, :tools, :handoff_agents, :temperature, :response_schema, :headers
54
54
 
55
55
  # Initialize a new Agent instance
56
56
  #
@@ -61,8 +61,9 @@ module Agents
61
61
  # @param handoff_agents [Array<Agents::Agent>] Array of agents this agent can hand off to
62
62
  # @param temperature [Float] Controls randomness in responses (0.0 = deterministic, 1.0 = very random, default: 0.7)
63
63
  # @param response_schema [Hash, nil] JSON schema for structured output responses
64
+ # @param headers [Hash, nil] Default HTTP headers applied to LLM requests
64
65
  def initialize(name:, instructions: nil, model: "gpt-4.1-mini", tools: [], handoff_agents: [], temperature: 0.7,
65
- response_schema: nil)
66
+ response_schema: nil, headers: nil)
66
67
  @name = name
67
68
  @instructions = instructions
68
69
  @model = model
@@ -70,6 +71,7 @@ module Agents
70
71
  @handoff_agents = []
71
72
  @temperature = temperature
72
73
  @response_schema = response_schema
74
+ @headers = Helpers::Headers.normalize(headers, freeze_result: true)
73
75
 
74
76
  # Mutex for thread-safe handoff registration
75
77
  # While agents are typically configured at startup, we want to ensure
@@ -164,7 +166,8 @@ module Agents
164
166
  tools: changes.fetch(:tools, @tools.dup),
165
167
  handoff_agents: changes.fetch(:handoff_agents, @handoff_agents),
166
168
  temperature: changes.fetch(:temperature, @temperature),
167
- response_schema: changes.fetch(:response_schema, @response_schema)
169
+ response_schema: changes.fetch(:response_schema, @response_schema),
170
+ headers: changes.fetch(:headers, @headers)
168
171
  )
169
172
  end
170
173
 
@@ -58,12 +58,12 @@ module Agents
58
58
  # @param input [String] User's message
59
59
  # @param context [Hash] Conversation context (will be restored if continuing conversation)
60
60
  # @param max_turns [Integer] Maximum turns before stopping (default: 10)
61
+ # @param headers [Hash, nil] Custom HTTP headers to pass through to the underlying LLM provider
61
62
  # @return [RunResult] Execution result with output, messages, and updated context
62
- def run(input, context: {}, max_turns: Runner::DEFAULT_MAX_TURNS)
63
+ def run(input, context: {}, max_turns: Runner::DEFAULT_MAX_TURNS, headers: nil)
63
64
  # Determine which agent should handle this conversation
64
65
  # Uses conversation history to maintain continuity across handoffs
65
66
  current_agent = determine_conversation_agent(context)
66
-
67
67
  # Execute using stateless Runner - each execution is independent and thread-safe
68
68
  # Pass callbacks to enable real-time event notifications
69
69
  Runner.new.run(
@@ -72,6 +72,7 @@ module Agents
72
72
  context: context,
73
73
  registry: @registry,
74
74
  max_turns: max_turns,
75
+ headers: headers,
75
76
  callbacks: @callbacks
76
77
  )
77
78
  end
@@ -69,11 +69,19 @@ module Agents
69
69
  @tool_description
70
70
  end
71
71
 
72
- # Handoff tools now work with the extended Chat class for proper handoff handling
73
- # No longer need context signaling - the Chat class detects handoffs directly
74
- def perform(_tool_context)
75
- # Simply return the transfer message - Chat class will handle the handoff
76
- "I'll transfer you to #{@target_agent.name} who can better assist you with this."
72
+ # Use RubyLLM's halt mechanism to stop continuation after handoff
73
+ # Store handoff info in context for Runner to detect and process
74
+ def perform(tool_context)
75
+ # Store handoff information in context for Runner to detect
76
+ # TODO: The following is a race condition that needs to be addressed in future versions
77
+ # If multiple handoff tools execute concurrently, they overwrite each other's pending_handoff data.
78
+ tool_context.run_context.context[:pending_handoff] = {
79
+ target_agent: @target_agent,
80
+ timestamp: Time.now
81
+ }
82
+
83
+ # Return halt to stop LLM continuation
84
+ halt("I'll transfer you to #{@target_agent.name} who can better assist you with this.")
77
85
  end
78
86
 
79
87
  # NOTE: RubyLLM will handle schema generation internally when needed
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agents::Helpers::Headers
4
+ module_function
5
+
6
+ def normalize(headers, freeze_result: false)
7
+ return freeze_result ? {}.freeze : {} if headers.nil? || (headers.respond_to?(:empty?) && headers.empty?)
8
+
9
+ hash = headers.respond_to?(:to_h) ? headers.to_h : headers
10
+ raise ArgumentError, "headers must be a Hash or respond to #to_h" unless hash.is_a?(Hash)
11
+
12
+ result = symbolize_keys(hash)
13
+ freeze_result ? result.freeze : result
14
+ end
15
+
16
+ def merge(agent_headers, runtime_headers)
17
+ return runtime_headers if agent_headers.empty?
18
+ return agent_headers if runtime_headers.empty?
19
+
20
+ agent_headers.merge(runtime_headers) { |_key, _agent_value, runtime_value| runtime_value }
21
+ end
22
+
23
+ def symbolize_keys(hash)
24
+ hash.each_with_object({}) do |(key, value), memo|
25
+ memo[key.is_a?(Symbol) ? key : key.to_sym] = value
26
+ end
27
+ end
28
+ private_class_method :symbolize_keys
29
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Service object responsible for extracting and formatting conversation messages
4
+ # from RubyLLM chat objects into a format suitable for persistence and context restoration.
5
+ #
6
+ # Handles different message types:
7
+ # - User messages: Basic content preservation
8
+ # - Assistant messages: Includes agent attribution and tool calls
9
+ # - Tool result messages: Links back to original tool calls
10
+ #
11
+ # @example Extract messages from a chat
12
+ # messages = Agents::Helpers::MessageExtractor.extract_messages(chat, current_agent)
13
+ # #=> [
14
+ # { role: :user, content: "Hello" },
15
+ # { role: :assistant, content: "Hi!", agent_name: "Support", tool_calls: [...] },
16
+ # { role: :tool, content: "Result", tool_call_id: "call_123" }
17
+ # ]
18
+ module Agents::Helpers::MessageExtractor
19
+ module_function
20
+
21
+ # Check if content is considered empty (handles both String and Hash content)
22
+ #
23
+ # @param content [String, Hash, nil] The content to check
24
+ # @return [Boolean] true if content is empty, false otherwise
25
+ def content_empty?(content)
26
+ case content
27
+ when String
28
+ content.strip.empty?
29
+ when Hash
30
+ content.empty?
31
+ else
32
+ content.nil?
33
+ end
34
+ end
35
+
36
+ # Extract messages from a chat object for conversation history persistence
37
+ #
38
+ # @param chat [Object] Chat object that responds to :messages
39
+ # @param current_agent [Agent] The agent currently handling the conversation
40
+ # @return [Array<Hash>] Array of message hashes suitable for persistence
41
+ def extract_messages(chat, current_agent)
42
+ return [] unless chat.respond_to?(:messages)
43
+
44
+ chat.messages.filter_map do |msg|
45
+ case msg.role
46
+ when :user, :assistant
47
+ extract_user_or_assistant_message(msg, current_agent)
48
+ when :tool
49
+ extract_tool_message(msg)
50
+ end
51
+ end
52
+ end
53
+
54
+ def extract_user_or_assistant_message(msg, current_agent)
55
+ return nil unless msg.content && !content_empty?(msg.content)
56
+
57
+ message = {
58
+ role: msg.role,
59
+ content: msg.content
60
+ }
61
+
62
+ if msg.role == :assistant
63
+ # Add agent attribution for conversation continuity
64
+ message[:agent_name] = current_agent.name if current_agent
65
+
66
+ # Add tool calls if present
67
+ if msg.tool_call? && msg.tool_calls
68
+ # RubyLLM stores tool_calls as Hash with call_id => ToolCall object
69
+ # Reference: RubyLLM::StreamAccumulator#tool_calls_from_stream
70
+ message[:tool_calls] = msg.tool_calls.values.map(&:to_h)
71
+ end
72
+ end
73
+
74
+ message
75
+ end
76
+ private_class_method :extract_user_or_assistant_message
77
+
78
+ def extract_tool_message(msg)
79
+ return nil unless msg.tool_result?
80
+
81
+ {
82
+ role: msg.role,
83
+ content: msg.content,
84
+ tool_call_id: msg.tool_call_id
85
+ }
86
+ end
87
+ private_class_method :extract_tool_message
88
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agents
4
+ module Helpers
5
+ end
6
+ end
7
+
8
+ require_relative "helpers/headers"
9
+ require_relative "helpers/message_extractor"