ai-agents 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c143345c7d0dd3a91ff483e0db22a7d23f6c85393275727eddcaf288a1341901
4
- data.tar.gz: 0f1abfe692571706be41b85a1da924a717f980b8721b21ece7dd62ad4e995fbf
3
+ metadata.gz: 6391e30443ff9e226e6b3bf3f629f3cd996cbb7fe4479c7a7dc437c82544ae2f
4
+ data.tar.gz: 22fc6ee4f3130006c1dd1f6c8fb704a176457d35602a434088d25cea5dd3c949
5
5
  SHA512:
6
- metadata.gz: 74c96799a138a5e725b3e889eba22c45e9581e7606a0cd235cecd50b8bdde1c6f10281ec1516fde77f48373c39022d2f72172be8c29489260df79fb575d92de8
7
- data.tar.gz: 257fd42a178107182a53eb737848ae1a023c28712ac450c4603b12cb60e2ad39dbe7b957a324bd6c24a3a175af8b7c373ec30e25e36425e81642a8f160da7ceb
6
+ metadata.gz: ae3866cfbec885088c5b41b0e91bbc8532fc3115ed981c3428f56c09b40c1a75bc9bb1277ed1991f19cee4c43118c3f9a5d9ea8d6ecc07d29ad03117a77666e6
7
+ data.tar.gz: b7de2e98dc3ce52b4c80b7b1bcfa4fb1f07f62176d861ab61b9a1092d2de4273657d54edfa18d6afd83acccee21c89e465f18539b8966c5f9645a673b4e5a3c4
data/CHANGELOG.md CHANGED
@@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [Unreleased]
9
+
10
+ ## [0.8.0] - 2026-01-07
11
+
12
+ ### Added
13
+ - **Provider Smoke Tests**: Comprehensive smoke tests for validating against the latest RubyLLM version
14
+
15
+ ### Changed
16
+ - **RubyLLM Update**: Updated to latest RubyLLM version with improved API integration and tool call handling
17
+
18
+ ### Fixed
19
+ - **Tool Message Restoration**: Fixed conversation history restoration to properly handle tool calls and results
20
+
8
21
  ## [0.7.0] - 2025-10-16
9
22
 
10
23
  ### Added
@@ -54,28 +54,35 @@ module Agents
54
54
  end
55
55
 
56
56
  def extract_user_or_assistant_message(msg, current_agent)
57
- return nil unless msg.content && !content_empty?(msg.content)
57
+ content_present = message_content?(msg)
58
+ tool_calls_present = assistant_tool_calls?(msg)
59
+ return nil unless content_present || tool_calls_present
58
60
 
59
61
  message = {
60
62
  role: msg.role,
61
- content: msg.content
63
+ content: content_present ? msg.content : ""
62
64
  }
63
65
 
64
- if msg.role == :assistant
65
- # Add agent attribution for conversation continuity
66
- message[:agent_name] = current_agent.name if current_agent
66
+ return message unless msg.role == :assistant
67
67
 
68
- # Add tool calls if present
69
- if msg.tool_call? && msg.tool_calls
70
- # RubyLLM stores tool_calls as Hash with call_id => ToolCall object
71
- # Reference: RubyLLM::StreamAccumulator#tool_calls_from_stream
72
- message[:tool_calls] = msg.tool_calls.values.map(&:to_h)
73
- end
68
+ message[:agent_name] = current_agent.name if current_agent
69
+
70
+ if tool_calls_present
71
+ # RubyLLM stores tool_calls as Hash with call_id => ToolCall object
72
+ # Reference: RubyLLM::StreamAccumulator#tool_calls_from_stream
73
+ message[:tool_calls] = msg.tool_calls.values.map(&:to_h)
74
74
  end
75
75
 
76
76
  message
77
77
  end
78
- private_class_method :extract_user_or_assistant_message
78
+
79
+ def message_content?(msg)
80
+ msg.content && !content_empty?(msg.content)
81
+ end
82
+
83
+ def assistant_tool_calls?(msg)
84
+ msg.role == :assistant && msg.tool_call? && msg.tool_calls && !msg.tool_calls.empty?
85
+ end
79
86
 
80
87
  def extract_tool_message(msg)
81
88
  return nil unless msg.tool_result?
@@ -86,7 +93,9 @@ module Agents
86
93
  tool_call_id: msg.tool_call_id
87
94
  }
88
95
  end
89
- private_class_method :extract_tool_message
96
+
97
+ private_class_method :extract_user_or_assistant_message, :message_content?, :assistant_tool_calls?,
98
+ :extract_tool_message
90
99
  end
91
100
  end
92
101
  end
@@ -94,15 +94,21 @@ module Agents
94
94
  end
95
95
 
96
96
  # Add usage metrics from an LLM response to the running totals.
97
- # Safely handles nil values in the usage object.
97
+ # Only tracks usage for responses that have token data (e.g., RubyLLM::Message).
98
+ # Safely skips responses without token methods (e.g., RubyLLM::Tool::Halt).
98
99
  #
99
- # @param usage [Object] An object responding to input_tokens, output_tokens, and total_tokens
100
+ # @param response [RubyLLM::Message] A RubyLLM::Message object with token usage data
100
101
  # @example Adding usage from an LLM response
101
- # usage.add(llm_response.usage)
102
- def add(usage)
103
- @input_tokens += usage.input_tokens || 0
104
- @output_tokens += usage.output_tokens || 0
105
- @total_tokens += usage.total_tokens || 0
102
+ # usage.add(llm_response)
103
+ def add(response)
104
+ return unless response.respond_to?(:input_tokens)
105
+
106
+ input = response.input_tokens || 0
107
+ output = response.output_tokens || 0
108
+
109
+ @input_tokens += input
110
+ @output_tokens += output
111
+ @total_tokens += input + output
106
112
  end
107
113
  end
108
114
  end
data/lib/agents/runner.rb CHANGED
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "set"
4
+
3
5
  module Agents
4
6
  # The execution engine that orchestrates conversations between users and agents.
5
7
  # Runner manages the conversation flow, handles tool execution through RubyLLM,
@@ -118,6 +120,7 @@ module Agents
118
120
  chat.complete
119
121
  end
120
122
  response = result
123
+ track_usage(response, context_wrapper)
121
124
 
122
125
  # Check for handoff via RubyLLM's halt mechanism
123
126
  if response.is_a?(RubyLLM::Tool::Halt) && context_wrapper.context[:pending_handoff]
@@ -264,26 +267,98 @@ module Agents
264
267
 
265
268
  # Restores conversation history from context into RubyLLM chat.
266
269
  # Converts stored message hashes back into RubyLLM::Message objects with proper content handling.
270
+ # Supports user, assistant, and tool role messages for complete conversation continuity.
267
271
  #
268
272
  # @param chat [RubyLLM::Chat] The chat instance to restore history into
269
273
  # @param context_wrapper [RunContext] Context containing conversation history
270
274
  def restore_conversation_history(chat, context_wrapper)
271
275
  history = context_wrapper.context[:conversation_history] || []
276
+ valid_tool_call_ids = Set.new
272
277
 
273
278
  history.each do |msg|
274
- # Only restore user and assistant messages with content
275
- next unless %i[user assistant].include?(msg[:role].to_sym)
276
- next unless msg[:content] && !Helpers::MessageExtractor.content_empty?(msg[:content])
279
+ next unless restorable_message?(msg)
277
280
 
278
- # Extract text content safely - handle both string and hash content
279
- content = RubyLLM::Content.new(msg[:content])
281
+ if msg[:role].to_sym == :tool &&
282
+ msg[:tool_call_id] &&
283
+ !valid_tool_call_ids.include?(msg[:tool_call_id])
284
+ Agents.logger&.warn("Skipping tool message without matching assistant tool_call_id #{msg[:tool_call_id]}")
285
+ next
286
+ end
280
287
 
281
- # Create a proper RubyLLM::Message and pass it to add_message
282
- message = RubyLLM::Message.new(
283
- role: msg[:role].to_sym,
284
- content: content
285
- )
288
+ message_params = build_message_params(msg)
289
+ next unless message_params # Skip invalid messages
290
+
291
+ message = RubyLLM::Message.new(**message_params)
286
292
  chat.add_message(message)
293
+
294
+ if message.role == :assistant && message_params[:tool_calls]
295
+ valid_tool_call_ids.merge(message_params[:tool_calls].keys)
296
+ end
297
+ end
298
+ end
299
+
300
+ # Check if a message should be restored
301
+ def restorable_message?(msg)
302
+ role = msg[:role].to_sym
303
+ return false unless %i[user assistant tool].include?(role)
304
+
305
+ # Allow assistant messages that only contain tool calls (no text content)
306
+ tool_calls_present = role == :assistant && msg[:tool_calls] && !msg[:tool_calls].empty?
307
+ return false if role != :tool && !tool_calls_present &&
308
+ Helpers::MessageExtractor.content_empty?(msg[:content])
309
+
310
+ true
311
+ end
312
+
313
+ # Build message parameters for restoration
314
+ def build_message_params(msg)
315
+ role = msg[:role].to_sym
316
+
317
+ content_value = msg[:content]
318
+ # Assistant tool-call messages may have empty text, but still need placeholder content
319
+ content_value = "" if content_value.nil? && role == :assistant && msg[:tool_calls]&.any?
320
+
321
+ params = {
322
+ role: role,
323
+ content: RubyLLM::Content.new(content_value)
324
+ }
325
+
326
+ # Handle tool-specific parameters (Tool Results)
327
+ if role == :tool
328
+ return nil unless valid_tool_message?(msg)
329
+
330
+ params[:tool_call_id] = msg[:tool_call_id]
331
+ end
332
+
333
+ # FIX: Restore tool_calls on assistant messages
334
+ # This is required by OpenAI/Anthropic API contracts to link
335
+ # subsequent tool result messages back to this request.
336
+ if role == :assistant && msg[:tool_calls] && !msg[:tool_calls].empty?
337
+ # Convert stored array of hashes back into the Hash format RubyLLM expects
338
+ # RubyLLM stores tool_calls as: { call_id => ToolCall_object, ... }
339
+ # Reference: openai/tools.rb:35 uses hash iteration |_, tc|
340
+ params[:tool_calls] = msg[:tool_calls].each_with_object({}) do |tc, hash|
341
+ tool_call_id = tc[:id] || tc["id"]
342
+ next unless tool_call_id
343
+
344
+ hash[tool_call_id] = RubyLLM::ToolCall.new(
345
+ id: tool_call_id,
346
+ name: tc[:name] || tc["name"],
347
+ arguments: tc[:arguments] || tc["arguments"] || {}
348
+ )
349
+ end
350
+ end
351
+
352
+ params
353
+ end
354
+
355
+ # Validate tool message has required tool_call_id
356
+ def valid_tool_message?(msg)
357
+ if msg[:tool_call_id]
358
+ true
359
+ else
360
+ Agents.logger&.warn("Skipping tool message without tool_call_id in conversation history")
361
+ false
287
362
  end
288
363
  end
289
364
 
@@ -340,6 +415,12 @@ module Agents
340
415
  chat.with_headers(**headers)
341
416
  end
342
417
 
418
+ def track_usage(response, context_wrapper)
419
+ return unless context_wrapper&.usage
420
+
421
+ context_wrapper.usage.add(response)
422
+ end
423
+
343
424
  # Builds thread-safe tool wrappers for an agent's tools and handoff tools.
344
425
  #
345
426
  # @param agent [Agents::Agent] The agent whose tools to wrap
@@ -73,6 +73,15 @@ module Agents
73
73
  @tool.parameters
74
74
  end
75
75
 
76
+ # Expose params schema for RubyLLM providers that expect it
77
+ def params_schema
78
+ @tool.respond_to?(:params_schema) ? @tool.params_schema : nil
79
+ end
80
+
81
+ def provider_params
82
+ @tool.respond_to?(:provider_params) ? @tool.provider_params : {}
83
+ end
84
+
76
85
  # Make this work with RubyLLM's tool calling
77
86
  def to_s
78
87
  name
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Agents
4
- VERSION = "0.7.0"
4
+ VERSION = "0.8.0"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ai-agents
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Shivam Mishra
@@ -15,14 +15,14 @@ dependencies:
15
15
  requirements:
16
16
  - - "~>"
17
17
  - !ruby/object:Gem::Version
18
- version: 1.8.2
18
+ version: 1.9.1
19
19
  type: :runtime
20
20
  prerelease: false
21
21
  version_requirements: !ruby/object:Gem::Requirement
22
22
  requirements:
23
23
  - - "~>"
24
24
  - !ruby/object:Gem::Version
25
- version: 1.8.2
25
+ version: 1.9.1
26
26
  description: Ruby AI Agents SDK enables creating complex AI workflows with multi-agent
27
27
  orchestration, tool execution, safety guardrails, and provider-agnostic LLM integration.
28
28
  email: