ai-agents 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,135 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require_relative "../../lib/agents"
5
+ require_relative "agents_factory"
6
+
7
+ # Simple ISP Customer Support Demo
8
+ class ISPSupportDemo
9
+ def initialize
10
+ # Configure the Agents SDK with API key
11
+ Agents.configure do |config|
12
+ config.openai_api_key = ENV["OPENAI_API_KEY"]
13
+ end
14
+
15
+ # Create agents
16
+ @agents = ISPSupport::AgentsFactory.create_agents
17
+ @triage_agent = @agents[:triage]
18
+ @context = {}
19
+
20
+ puts "🏢 Welcome to ISP Customer Support!"
21
+ puts "Type '/help' for commands or 'exit' to quit."
22
+ puts
23
+ end
24
+
25
+ def start
26
+ loop do
27
+ print "💬 You: "
28
+ user_input = gets.chomp.strip
29
+
30
+ command_result = handle_command(user_input)
31
+ break if command_result == :exit
32
+ next if command_result == :handled || user_input.empty?
33
+
34
+ # Determine which agent to use - either from context or triage agent
35
+ current_agent = @context[:current_agent] || @triage_agent
36
+
37
+ result = Agents::Runner.run(current_agent, user_input, context: @context)
38
+
39
+ # Update our context with the returned context from Runner
40
+ @context = result.context if result.respond_to?(:context) && result.context
41
+
42
+ puts "🤖 #{result.output || "[No output]"}"
43
+
44
+ puts
45
+ end
46
+ end
47
+
48
+ private
49
+
50
+ def handle_command(input)
51
+ case input.downcase
52
+ when "exit", "quit"
53
+ puts "👋 Goodbye!"
54
+ :exit
55
+ when "/help"
56
+ show_help
57
+ :handled
58
+ when "/reset"
59
+ @context.clear
60
+ puts "🔄 Context reset. Starting fresh conversation."
61
+ :handled
62
+ when "/agents"
63
+ show_agents
64
+ :handled
65
+ when "/tools"
66
+ show_tools
67
+ :handled
68
+ when "/context"
69
+ show_context
70
+ :handled
71
+ else
72
+ :not_command # Not a command, continue with normal processing
73
+ end
74
+ end
75
+
76
+ def show_help
77
+ puts "📋 Available Commands:"
78
+ puts " /help - Show this help message"
79
+ puts " /reset - Clear conversation context and start fresh"
80
+ puts " /agents - List all available agents"
81
+ puts " /tools - Show tools available to agents"
82
+ puts " /context - Show current conversation context"
83
+ puts " exit/quit - End the session"
84
+ puts
85
+ puts "💡 Example customer requests:"
86
+ puts " - 'What's my current plan?' (try account ID: CUST001)"
87
+ puts " - 'I want to upgrade my internet'"
88
+ puts " - 'My internet is slow'"
89
+ end
90
+
91
+ def show_agents
92
+ puts "🤖 Available Agents:"
93
+ @agents.each do |key, agent|
94
+ puts " #{agent.name} - #{get_agent_description(key)}"
95
+ end
96
+ end
97
+
98
+ def show_tools
99
+ puts "🔧 Agent Tools:"
100
+ @agents.each_value do |agent|
101
+ puts " #{agent.name}:"
102
+ if agent.all_tools.empty?
103
+ puts " (no tools)"
104
+ else
105
+ agent.all_tools.each do |tool|
106
+ puts " - #{tool.name}: #{tool.description}"
107
+ end
108
+ end
109
+ end
110
+ end
111
+
112
+ def show_context
113
+ puts "📊 Current Context:"
114
+ if @context.empty?
115
+ puts " (empty)"
116
+ else
117
+ @context.each do |key, value|
118
+ puts " #{key}: #{value}"
119
+ end
120
+ end
121
+ end
122
+
123
+ def get_agent_description(key)
124
+ case key
125
+ when :triage then "Routes customers to appropriate specialists"
126
+ when :customer_info then "Handles account information and billing"
127
+ when :sales then "Manages new sales and upgrades"
128
+ when :support then "Provides technical support and troubleshooting"
129
+ else "Unknown agent"
130
+ end
131
+ end
132
+ end
133
+
134
+ # Run the demo
135
+ ISPSupportDemo.new.start if __FILE__ == $PROGRAM_NAME
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "securerandom"
4
+
5
+ module ISPSupport
6
+ # Tool for creating checkout links for new service subscriptions.
7
+ class CreateCheckoutTool < Agents::Tool
8
+ description "Create a secure checkout link for a service plan"
9
+ param :plan_name, String, "Name of the plan to purchase"
10
+
11
+ def perform(_tool_context, plan_name:)
12
+ session_id = SecureRandom.hex(8)
13
+ "https://checkout.isp.com/#{session_id}"
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ISPSupport
4
+ # Tool for creating sales leads in the CRM system.
5
+ class CreateLeadTool < Agents::Tool
6
+ description "Create a new sales lead with customer information"
7
+ param :name, String, "Customer's full name"
8
+ param :email, String, "Customer's email address"
9
+ param :desired_plan, String, "Plan the customer is interested in"
10
+
11
+ def perform(_tool_context, name:, email:, desired_plan:)
12
+ "Lead created for #{name} (#{email}) interested in #{desired_plan} plan. Sales team will contact within 24 hours."
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module ISPSupport
6
+ # Tool for looking up customer information from the CRM system.
7
+ class CrmLookupTool < Agents::Tool
8
+ description "Look up customer account information by account ID"
9
+ param :account_id, String, "Customer account ID (e.g., CUST001)"
10
+
11
+ def perform(_tool_context, account_id:)
12
+ data_file = File.join(__dir__, "../data/customers.json")
13
+ return "Customer database unavailable" unless File.exist?(data_file)
14
+
15
+ begin
16
+ customers = JSON.parse(File.read(data_file))
17
+ customer = customers[account_id.upcase]
18
+
19
+ return "Customer not found" unless customer
20
+
21
+ # Return the entire customer data as JSON for the agent to process
22
+ customer.to_json
23
+ rescue StandardError
24
+ "Error looking up customer"
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ISPSupport
4
+ # Tool for escalating complex issues to human support agents.
5
+ class EscalateToHumanTool < Agents::Tool
6
+ description "Escalate the issue to a human support agent"
7
+
8
+ def perform(_tool_context)
9
+ "I'm connecting you to a human support agent. Please hold while I transfer your case. A live agent will be with you shortly to provide personalized assistance."
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ISPSupport
4
+ # Tool for searching the knowledge base documentation.
5
+ class SearchDocsTool < Agents::Tool
6
+ description "Search knowledge base for troubleshooting steps and solutions"
7
+ param :query, String, "Search terms or description of the issue"
8
+
9
+ def perform(_tool_context, query:)
10
+ case query.downcase
11
+ when /slow|speed/
12
+ "Try restarting your modem and router. Unplug for 30 seconds, then plug back in. Test speed at speedtest.net."
13
+ when /no internet|down|offline/
14
+ "Check modem lights are solid green. Unplug modem for 30 seconds, then plug back in. Wait 3 minutes for restart."
15
+ when /wifi|wireless/
16
+ "Check WiFi is enabled on device. Verify correct password. Move closer to router. Restart router if needed."
17
+ else
18
+ "General troubleshooting: Restart modem and router, check cable connections, test with different device."
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,170 @@
1
+ # frozen_string_literal: true
2
+
3
+ # The core agent definition that represents an AI assistant with specific capabilities.
4
+ # Agents are immutable, thread-safe objects that can be cloned with modifications.
5
+ # They encapsulate the configuration needed to interact with an LLM including
6
+ # instructions, tools, and potential handoff targets.
7
+ #
8
+ # @example Creating a basic agent
9
+ # agent = Agents::Agent.new(
10
+ # name: "Assistant",
11
+ # instructions: "You are a helpful assistant",
12
+ # model: "gpt-4",
13
+ # tools: [calculator_tool, weather_tool]
14
+ # )
15
+ #
16
+ # @example Creating an agent with dynamic instructions
17
+ # agent = Agents::Agent.new(
18
+ # name: "Support Agent",
19
+ # instructions: ->(context) {
20
+ # "You are supporting user #{context.context[:user_name]}"
21
+ # }
22
+ # )
23
+ #
24
+ # @example Cloning an agent with modifications
25
+ # specialized_agent = base_agent.clone(
26
+ # instructions: "You are a specialized assistant",
27
+ # tools: base_agent.tools + [new_tool]
28
+ # )
29
+ module Agents
30
+ class Agent
31
+ attr_reader :name, :instructions, :model, :tools, :handoff_agents
32
+
33
+ # Initialize a new Agent instance
34
+ #
35
+ # @param name [String] The name of the agent
36
+ # @param instructions [String, Proc, nil] Static string or dynamic Proc that returns instructions
37
+ # @param model [String] The LLM model to use (default: "gpt-4.1-mini")
38
+ # @param tools [Array<Agents::Tool>] Array of tool instances the agent can use
39
+ # @param handoff_agents [Array<Agents::Agent>] Array of agents this agent can hand off to
40
+ def initialize(name:, instructions: nil, model: "gpt-4.1-mini", tools: [], handoff_agents: [])
41
+ @name = name
42
+ @instructions = instructions
43
+ @model = model
44
+ @tools = tools.dup
45
+ @handoff_agents = []
46
+
47
+ # Mutex for thread-safe handoff registration
48
+ # While agents are typically configured at startup, we want to ensure
49
+ # that concurrent handoff registrations don't result in lost data.
50
+ # For example, in a web server with multiple threads initializing
51
+ # different parts of the system, we might have:
52
+ # Thread 1: triage.register_handoffs(billing)
53
+ # Thread 2: triage.register_handoffs(support)
54
+ # Without synchronization, one registration could overwrite the other.
55
+ @mutex = Mutex.new
56
+
57
+ # Register initial handoff agents if provided
58
+ register_handoffs(*handoff_agents) unless handoff_agents.empty?
59
+ end
60
+
61
+ # Get all tools available to this agent, including any auto-generated handoff tools
62
+ #
63
+ # @return [Array<Agents::Tool>] All tools available to the agent
64
+ def all_tools
65
+ @mutex.synchronize do
66
+ # Compute handoff tools dynamically
67
+ handoff_tools = @handoff_agents.map { |agent| HandoffTool.new(agent) }
68
+ @tools + handoff_tools
69
+ end
70
+ end
71
+
72
+ # Register agents that this agent can hand off to.
73
+ # This method can be called after agent creation to set up handoff relationships.
74
+ # Thread-safe: Multiple threads can safely call this method concurrently.
75
+ #
76
+ # @param agents [Array<Agents::Agent>] Agents to register as handoff targets
77
+ # @return [self] Returns self for method chaining
78
+ # @example Setting up hub-and-spoke pattern
79
+ # # Create agents
80
+ # triage = Agent.new(name: "Triage", instructions: "Route to specialists")
81
+ # billing = Agent.new(name: "Billing", instructions: "Handle payments")
82
+ # support = Agent.new(name: "Support", instructions: "Fix technical issues")
83
+ #
84
+ # # Wire up handoffs after creation - much cleaner than complex factories!
85
+ # triage.register_handoffs(billing, support)
86
+ # billing.register_handoffs(triage) # Specialists only handoff back to triage
87
+ # support.register_handoffs(triage)
88
+ def register_handoffs(*agents)
89
+ @mutex.synchronize do
90
+ @handoff_agents.concat(agents)
91
+ @handoff_agents.uniq! # Prevent duplicates
92
+ end
93
+ self
94
+ end
95
+
96
+ # Creates a new agent instance with modified attributes while preserving immutability.
97
+ # The clone method is used when you need to create variations of agents without mutating the original.
98
+ # This can be used for runtime agent modifications, say in a multi-tenant environment we can do something like the following:
99
+ #
100
+ # @example Multi-tenant agent customization
101
+ # def agent_for_tenant(tenant)
102
+ # @base_agent.clone(
103
+ # instructions: "You work for #{tenant.company_name}",
104
+ # tools: @base_agent.tools + tenant.custom_tools
105
+ # )
106
+ # end
107
+ #
108
+ # @example Creating specialized variants
109
+ # finance_writer = @writer_agent.clone(
110
+ # tools: @writer_agent.tools + [financial_research_tool]
111
+ # )
112
+ #
113
+ # marketing_writer = @writer_agent.clone(
114
+ # tools: @writer_agent.tools + [marketing_research_tool]
115
+ # )
116
+ #
117
+ # The key insight to note here is that clone ensures immutability - you never accidentally modify a shared agent
118
+ # instance that other requests might be using. This is critical for thread safety in concurrent
119
+ # environments.
120
+ #
121
+ # This also ensures we also get to leverage the syntax sugar defining a class provides us with.
122
+ #
123
+ # @param changes [Hash] Keyword arguments for attributes to change
124
+ # @option changes [String] :name New agent name
125
+ # @option changes [String, Proc] :instructions New instructions
126
+ # @option changes [String] :model New model identifier
127
+ # @option changes [Array<Agents::Tool>] :tools New tools array (replaces all tools)
128
+ # @option changes [Array<Agents::Agent>] :handoff_agents New handoff agents
129
+ # @return [Agents::Agent] A new frozen agent instance with the specified changes
130
+ def clone(**changes)
131
+ self.class.new(
132
+ name: changes.fetch(:name, @name),
133
+ instructions: changes.fetch(:instructions, @instructions),
134
+ model: changes.fetch(:model, @model),
135
+ tools: changes.fetch(:tools, @tools.dup),
136
+ handoff_agents: changes.fetch(:handoff_agents, @handoff_agents)
137
+ )
138
+ end
139
+
140
+ # Get the system prompt for the agent, potentially customized based on runtime context.
141
+ # We will allow setting up a Proc for instructions.
142
+ # This will allow us the inject context in runtime.
143
+ #
144
+ # @example Static instructions (most common)
145
+ # agent = Agent.new(
146
+ # name: "Support",
147
+ # instructions: "You are a helpful support agent"
148
+ # )
149
+ #
150
+ # @example Dynamic instructions based on context
151
+ # agent = Agent.new(
152
+ # name: "Support",
153
+ # instructions: ->(context) {
154
+ # user = context.context[:user]
155
+ # "You are helping #{user.name}. They are a #{user.tier} customer with account #{user.id}"
156
+ # }
157
+ # )
158
+ #
159
+ # @param context [Agents::RunContext] The current execution context containing runtime data
160
+ # @return [String, nil] The system prompt string or nil if no instructions are set
161
+ def get_system_prompt(context)
162
+ case instructions
163
+ when String
164
+ instructions
165
+ when Proc
166
+ instructions.call(context)
167
+ end
168
+ end
169
+ end
170
+ end
@@ -0,0 +1,116 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agents
4
+ # A special tool that enables agents to transfer conversations to other specialized agents.
5
+ # Handoffs are implemented as tools (following OpenAI's pattern) because this allows
6
+ # the LLM to naturally decide when to transfer based on the conversation context.
7
+ #
8
+ # ## How Handoffs Work
9
+ # 1. Agent A is configured with handoff_agents: [Agent B, Agent C]
10
+ # 2. This automatically creates HandoffTool instances for B and C
11
+ # 3. The LLM can call these tools like any other tool
12
+ # 4. The tool signals the handoff through context
13
+ # 5. The Runner detects this and switches to the new agent
14
+ #
15
+ # ## First-Call-Wins Implementation
16
+ # This implementation uses "first-call-wins" semantics to prevent infinite handoff loops.
17
+ #
18
+ # ### The Problem We Solved
19
+ # During development, we discovered that LLMs could call the same handoff tool multiple times
20
+ # in a single response, leading to infinite loops:
21
+ #
22
+ # 1. User: "My internet isn't working but my account shows active"
23
+ # 2. Triage Agent hands off to Support Agent
24
+ # 3. Support Agent sees account info is needed, hands back to Triage Agent
25
+ # 4. Triage Agent sees technical issue, hands off to Support Agent again
26
+ # 5. This creates an infinite ping-pong loop
27
+ #
28
+ # ### Root Cause Analysis
29
+ # Unlike OpenAI's SDK which processes tool calls before execution, RubyLLM automatically
30
+ # executes all tool calls in a response. This meant:
31
+ # - LLM calls handoff tool 10+ times in one response
32
+ # - Each call sets context[:pending_handoff], overwriting previous values
33
+ # - Runner processes handoffs after tool execution, seeing only the last one
34
+ # - Multiple handoff signals created conflicting state
35
+ #
36
+ # TODO: Overall, this problem can be tackled better if we replace the RubyLLM chat
37
+ # program with our own implementation.
38
+ #
39
+ # ### The Solution
40
+ # We implemented first-call-wins semantics inspired by OpenAI's approach:
41
+ # - First handoff call in a response sets the pending handoff
42
+ # - Subsequent calls are ignored with a "transfer in progress" message
43
+ # - This prevents loops and mirrors OpenAI SDK behavior
44
+ #
45
+ # ## Why Tools Instead of Instructions
46
+ # Using tools for handoffs has several advantages:
47
+ # - LLMs reliably use tools when appropriate
48
+ # - Clear schema tells the LLM when each handoff is suitable
49
+ # - No parsing of free text needed
50
+ # - Works consistently across different LLM providers
51
+ #
52
+ # @example Basic handoff setup
53
+ # billing_agent = Agent.new(name: "Billing", instructions: "Handle payments")
54
+ # support_agent = Agent.new(name: "Support", instructions: "Technical help")
55
+ #
56
+ # triage = Agent.new(
57
+ # name: "Triage",
58
+ # instructions: "Route users to the right team",
59
+ # handoff_agents: [billing_agent, support_agent]
60
+ # )
61
+ # # Creates tools: handoff_to_billing, handoff_to_support
62
+ #
63
+ # @example How the LLM sees it
64
+ # # User: "I can't pay my bill"
65
+ # # LLM thinks: "This is a payment issue, I should transfer to billing"
66
+ # # LLM calls: handoff_to_billing()
67
+ # # Runner switches to billing_agent for the next turn
68
+ #
69
+ # @example First-call-wins in action
70
+ # # Single LLM response with multiple handoff calls:
71
+ # # Call 1: handoff_to_support() -> Sets pending_handoff, returns "Transferring to Support"
72
+ # # Call 2: handoff_to_support() -> Ignored, returns "Transfer already in progress"
73
+ # # Call 3: handoff_to_billing() -> Ignored, returns "Transfer already in progress"
74
+ # # Result: Only transfers to Support Agent (first call wins)
75
+ class HandoffTool < Tool
76
+ attr_reader :target_agent
77
+
78
+ def initialize(target_agent)
79
+ @target_agent = target_agent
80
+
81
+ # Set up the tool with a standardized name and description
82
+ @tool_name = "handoff_to_#{target_agent.name.downcase.gsub(/\s+/, "_")}"
83
+ @tool_description = "Transfer conversation to #{target_agent.name}"
84
+
85
+ super()
86
+ end
87
+
88
+ # Override the auto-generated name to use our specific name
89
+ def name
90
+ @tool_name
91
+ end
92
+
93
+ # Override the description
94
+ def description
95
+ @tool_description
96
+ end
97
+
98
+ # Handoff tools implement first-call-wins semantics to prevent infinite loops
99
+ # Multiple handoff calls in a single response are ignored (like OpenAI SDK)
100
+ def perform(tool_context)
101
+ # First-call-wins: only set handoff if not already set
102
+ if tool_context.context[:pending_handoff]
103
+ return "Transfer request noted (already processing a handoff)."
104
+ end
105
+
106
+ # Set the handoff target
107
+ tool_context.context[:pending_handoff] = @target_agent
108
+
109
+ # Return a message that will be shown to the user
110
+ "I'll transfer you to #{@target_agent.name} who can better assist you with this."
111
+ end
112
+
113
+ # NOTE: RubyLLM will handle schema generation internally when needed
114
+ # Handoff tools have no parameters, which RubyLLM will detect automatically
115
+ end
116
+ end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Agents
4
+ RunResult = Struct.new(:output, :messages, :usage, :error, :context, keyword_init: true) do
5
+ def success?
6
+ error.nil? && !output.nil?
7
+ end
8
+
9
+ def failed?
10
+ !success?
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+
3
+ # RunContext encapsulates the execution context and usage metrics for a single agent run.
4
+ # It provides isolation between concurrent executions by giving each run its own context
5
+ # copy and tracking token usage throughout the execution. This is a key component in
6
+ # ensuring thread safety.
7
+ #
8
+ # @example Creating a RunContext for an agent execution
9
+ # context_data = { user_id: 123, session: "abc" }
10
+ # run_context = Agents::RunContext.new(context_data)
11
+ #
12
+ # # Access context during execution
13
+ # user_id = run_context.context[:user_id]
14
+ #
15
+ # # Track usage after LLM calls
16
+ # run_context.usage.add(llm_response.usage)
17
+ #
18
+ # @example Tracking token usage across multiple LLM calls
19
+ # run_context = Agents::RunContext.new({})
20
+ #
21
+ # # First LLM call
22
+ # response1 = llm.complete(prompt1)
23
+ # run_context.usage.add(response1.usage)
24
+ #
25
+ # # Second LLM call
26
+ # response2 = llm.complete(prompt2)
27
+ # run_context.usage.add(response2.usage)
28
+ #
29
+ # # Total usage is automatically accumulated
30
+ # puts "Total tokens: #{run_context.usage.total_tokens}"
31
+ #
32
+ # @example Thread safety through context isolation
33
+ # # Shared configuration (never modified)
34
+ # base_context = { api_key: "secret", model: "gpt-4" }
35
+ #
36
+ # # Concurrent agent runs using Async
37
+ # Async do
38
+ # 5.times.map do |i|
39
+ # Async do
40
+ # # Each run gets its own context COPY
41
+ # run_context = Agents::RunContext.new(base_context.dup)
42
+ #
43
+ # # Safe to modify - changes are isolated to this run
44
+ # run_context.context[:user_id] = i
45
+ # run_context.context[:session] = "session_#{i}"
46
+ #
47
+ # # Other concurrent runs cannot see these changes
48
+ # puts "Run #{i}: user_id = #{run_context.context[:user_id]}"
49
+ # end
50
+ # end.map(&:wait)
51
+ # end
52
+ #
53
+ # # Key points:
54
+ # # - base_context remains unmodified
55
+ # # - Each run has isolated context via .dup
56
+ # # - No race conditions or data leakage between runs
57
+ module Agents
58
+ class RunContext
59
+ attr_reader :context, :usage
60
+
61
+ # Initialize a new RunContext with execution context and usage tracking
62
+ #
63
+ # @param context [Hash] The execution context data (will be duplicated for isolation)
64
+ def initialize(context)
65
+ @context = context
66
+ @usage = Usage.new
67
+ end
68
+
69
+ # Usage tracks token consumption across all LLM calls within a single run.
70
+ # This is very rudimentary usage reporting.
71
+ # We can use this further for billing purposes, but is not a replacement for tracing.
72
+ #
73
+ # @example Accumulating usage from multiple LLM calls
74
+ # usage = Agents::RunContext::Usage.new
75
+ #
76
+ # # Add usage from first call
77
+ # usage.add(OpenStruct.new(input_tokens: 100, output_tokens: 50, total_tokens: 150))
78
+ #
79
+ # # Add usage from second call
80
+ # usage.add(OpenStruct.new(input_tokens: 200, output_tokens: 100, total_tokens: 300))
81
+ #
82
+ # puts usage.total_tokens # => 450
83
+ class Usage
84
+ attr_accessor :input_tokens, :output_tokens, :total_tokens
85
+
86
+ # Initialize a new Usage tracker with all counters at zero
87
+ def initialize
88
+ @input_tokens = 0
89
+ @output_tokens = 0
90
+ @total_tokens = 0
91
+ end
92
+
93
+ # Add usage metrics from an LLM response to the running totals.
94
+ # Safely handles nil values in the usage object.
95
+ #
96
+ # @param usage [Object] An object responding to input_tokens, output_tokens, and total_tokens
97
+ # @example Adding usage from an LLM response
98
+ # usage.add(llm_response.usage)
99
+ def add(usage)
100
+ @input_tokens += usage.input_tokens || 0
101
+ @output_tokens += usage.output_tokens || 0
102
+ @total_tokens += usage.total_tokens || 0
103
+ end
104
+ end
105
+ end
106
+ end