ruby_llm-agents 0.3.0 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,6 +50,7 @@ module RubyLLM
50
50
  # @param kwargs [Hash] Named parameters for the agent
51
51
  # @option kwargs [Boolean] :dry_run Return prompt info without API call
52
52
  # @option kwargs [Boolean] :skip_cache Bypass caching even if enabled
53
+ # @option kwargs [String, Array<String>] :with Attachments (files, URLs) to send with the prompt
53
54
  # @yield [chunk] Yields chunks when streaming is enabled
54
55
  # @yieldparam chunk [RubyLLM::Chunk] A streaming chunk with content
55
56
  # @return [Object] The processed response from the agent
@@ -64,6 +65,10 @@ module RubyLLM
64
65
  # ChatAgent.call(message: "Hello") do |chunk|
65
66
  # print chunk.content
66
67
  # end
68
+ #
69
+ # @example With attachments
70
+ # VisionAgent.call(query: "Describe this image", with: "photo.jpg")
71
+ # VisionAgent.call(query: "Compare these", with: ["a.png", "b.png"])
67
72
  def call(*args, **kwargs, &block)
68
73
  new(*args, **kwargs).call(&block)
69
74
  end
@@ -320,7 +325,9 @@ module RubyLLM
320
325
  # @return [RubyLLM::Chat] The configured RubyLLM client
321
326
  # @!attribute [r] time_to_first_token_ms
322
327
  # @return [Integer, nil] Time to first token in milliseconds (streaming only)
323
- attr_reader :model, :temperature, :client, :time_to_first_token_ms
328
+ # @!attribute [r] accumulated_tool_calls
329
+ # @return [Array<Hash>] Tool calls accumulated during execution
330
+ attr_reader :model, :temperature, :client, :time_to_first_token_ms, :accumulated_tool_calls
324
331
 
325
332
  # Creates a new agent instance
326
333
  #
@@ -332,6 +339,7 @@ module RubyLLM
332
339
  @model = model
333
340
  @temperature = temperature
334
341
  @options = options
342
+ @accumulated_tool_calls = []
335
343
  validate_required_params!
336
344
  @client = build_client
337
345
  end
@@ -419,20 +427,26 @@ module RubyLLM
419
427
 
420
428
  # Returns prompt info without making an API call (debug mode)
421
429
  #
422
- # @return [Hash] Agent configuration and prompt info
430
+ # @return [Result] A Result with dry run configuration info
423
431
  def dry_run_response
424
- {
425
- dry_run: true,
426
- agent: self.class.name,
427
- model: model,
432
+ Result.new(
433
+ content: {
434
+ dry_run: true,
435
+ agent: self.class.name,
436
+ model: model,
437
+ temperature: temperature,
438
+ timeout: self.class.timeout,
439
+ system_prompt: system_prompt,
440
+ user_prompt: user_prompt,
441
+ attachments: @options[:with],
442
+ schema: schema&.class&.name,
443
+ streaming: self.class.streaming,
444
+ tools: self.class.tools.map { |t| t.respond_to?(:name) ? t.name : t.to_s }
445
+ },
446
+ model_id: model,
428
447
  temperature: temperature,
429
- timeout: self.class.timeout,
430
- system_prompt: system_prompt,
431
- user_prompt: user_prompt,
432
- schema: schema&.class&.name,
433
- streaming: self.class.streaming,
434
- tools: self.class.tools.map { |t| t.respond_to?(:name) ? t.name : t.to_s }
435
- }
448
+ streaming: self.class.streaming
449
+ )
436
450
  end
437
451
 
438
452
  private
@@ -456,17 +470,20 @@ module RubyLLM
456
470
  #
457
471
  # @param model_override [String, nil] Optional model to use instead of default
458
472
  # @yield [chunk] Yields chunks when streaming is enabled
459
- # @return [Object] The processed response
473
+ # @return [Result] A Result object with processed content and metadata
460
474
  def execute_single_attempt(model_override: nil, &block)
461
475
  current_client = model_override ? build_client_with_model(model_override) : client
462
476
  @execution_started_at ||= Time.current
477
+ reset_accumulated_tool_calls!
463
478
 
464
479
  Timeout.timeout(self.class.timeout) do
465
480
  if self.class.streaming && block_given?
466
481
  execute_with_streaming(current_client, &block)
467
482
  else
468
- response = current_client.ask(user_prompt)
469
- process_response(capture_response(response))
483
+ response = current_client.ask(user_prompt, **ask_options)
484
+ extract_tool_calls_from_client(current_client)
485
+ capture_response(response)
486
+ build_result(process_response(response), response)
470
487
  end
471
488
  end
472
489
  end
@@ -479,11 +496,11 @@ module RubyLLM
479
496
  # @param current_client [RubyLLM::Chat] The configured client
480
497
  # @yield [chunk] Yields each chunk as it arrives
481
498
  # @yieldparam chunk [RubyLLM::Chunk] A streaming chunk
482
- # @return [Object] The processed response
499
+ # @return [Result] A Result object with processed content and metadata
483
500
  def execute_with_streaming(current_client, &block)
484
501
  first_chunk_at = nil
485
502
 
486
- response = current_client.ask(user_prompt) do |chunk|
503
+ response = current_client.ask(user_prompt, **ask_options) do |chunk|
487
504
  first_chunk_at ||= Time.current
488
505
  yield chunk if block_given?
489
506
  end
@@ -492,7 +509,9 @@ module RubyLLM
492
509
  @time_to_first_token_ms = ((first_chunk_at - @execution_started_at) * 1000).to_i
493
510
  end
494
511
 
495
- process_response(capture_response(response))
512
+ extract_tool_calls_from_client(current_client)
513
+ capture_response(response)
514
+ build_result(process_response(response), response)
496
515
  end
497
516
 
498
517
  # Executes the agent with retry/fallback/circuit breaker support
@@ -692,7 +711,18 @@ module RubyLLM
692
711
  #
693
712
  # @return [Hash] Data to hash for cache key
694
713
  def cache_key_data
695
- @options.except(:skip_cache, :dry_run)
714
+ @options.except(:skip_cache, :dry_run, :with)
715
+ end
716
+
717
+ # Returns options to pass to the ask method
718
+ #
719
+ # Currently supports :with for attachments (images, PDFs, etc.)
720
+ #
721
+ # @return [Hash] Options for the ask call
722
+ def ask_options
723
+ opts = {}
724
+ opts[:with] = @options[:with] if @options[:with]
725
+ opts
696
726
  end
697
727
 
698
728
  # Validates that all required parameters are present
@@ -734,6 +764,183 @@ module RubyLLM
734
764
  client.with_message(message[:role], message[:content])
735
765
  end
736
766
  end
767
+
768
+ # @!group Result Building
769
+
770
+ # Builds a Result object from processed content and response metadata
771
+ #
772
+ # @param content [Hash, String] The processed response content
773
+ # @param response [RubyLLM::Message] The raw LLM response
774
+ # @return [Result] A Result object with full execution metadata
775
+ def build_result(content, response)
776
+ completed_at = Time.current
777
+ input_tokens = result_response_value(response, :input_tokens)
778
+ output_tokens = result_response_value(response, :output_tokens)
779
+ response_model_id = result_response_value(response, :model_id)
780
+
781
+ Result.new(
782
+ content: content,
783
+ input_tokens: input_tokens,
784
+ output_tokens: output_tokens,
785
+ cached_tokens: result_response_value(response, :cached_tokens, 0),
786
+ cache_creation_tokens: result_response_value(response, :cache_creation_tokens, 0),
787
+ model_id: model,
788
+ chosen_model_id: response_model_id || model,
789
+ temperature: temperature,
790
+ started_at: @execution_started_at,
791
+ completed_at: completed_at,
792
+ duration_ms: result_duration_ms(completed_at),
793
+ time_to_first_token_ms: @time_to_first_token_ms,
794
+ finish_reason: result_finish_reason(response),
795
+ streaming: self.class.streaming,
796
+ input_cost: result_input_cost(input_tokens, response_model_id),
797
+ output_cost: result_output_cost(output_tokens, response_model_id),
798
+ total_cost: result_total_cost(input_tokens, output_tokens, response_model_id),
799
+ tool_calls: @accumulated_tool_calls,
800
+ tool_calls_count: @accumulated_tool_calls.size
801
+ )
802
+ end
803
+
804
+ # Safely extracts a value from the response object
805
+ #
806
+ # @param response [Object] The response object
807
+ # @param method [Symbol] The method to call
808
+ # @param default [Object] Default value if method doesn't exist
809
+ # @return [Object] The extracted value or default
810
+ def result_response_value(response, method, default = nil)
811
+ return default unless response.respond_to?(method)
812
+ response.send(method) || default
813
+ end
814
+
815
+ # Calculates execution duration in milliseconds
816
+ #
817
+ # @param completed_at [Time] When execution completed
818
+ # @return [Integer, nil] Duration in ms or nil
819
+ def result_duration_ms(completed_at)
820
+ return nil unless @execution_started_at
821
+ ((completed_at - @execution_started_at) * 1000).to_i
822
+ end
823
+
824
+ # Extracts finish reason from response
825
+ #
826
+ # @param response [Object] The response object
827
+ # @return [String, nil] Normalized finish reason
828
+ def result_finish_reason(response)
829
+ reason = result_response_value(response, :finish_reason) ||
830
+ result_response_value(response, :stop_reason)
831
+ return nil unless reason
832
+
833
+ # Normalize to standard values
834
+ case reason.to_s.downcase
835
+ when "stop", "end_turn" then "stop"
836
+ when "length", "max_tokens" then "length"
837
+ when "content_filter", "safety" then "content_filter"
838
+ when "tool_calls", "tool_use" then "tool_calls"
839
+ else "other"
840
+ end
841
+ end
842
+
843
+ # Calculates input cost from tokens
844
+ #
845
+ # @param input_tokens [Integer, nil] Number of input tokens
846
+ # @param response_model_id [String, nil] Model that responded
847
+ # @return [Float, nil] Input cost in USD
848
+ def result_input_cost(input_tokens, response_model_id)
849
+ return nil unless input_tokens
850
+ model_info = result_model_info(response_model_id)
851
+ return nil unless model_info&.pricing
852
+ price = model_info.pricing.text_tokens&.input || 0
853
+ (input_tokens / 1_000_000.0 * price).round(6)
854
+ end
855
+
856
+ # Calculates output cost from tokens
857
+ #
858
+ # @param output_tokens [Integer, nil] Number of output tokens
859
+ # @param response_model_id [String, nil] Model that responded
860
+ # @return [Float, nil] Output cost in USD
861
+ def result_output_cost(output_tokens, response_model_id)
862
+ return nil unless output_tokens
863
+ model_info = result_model_info(response_model_id)
864
+ return nil unless model_info&.pricing
865
+ price = model_info.pricing.text_tokens&.output || 0
866
+ (output_tokens / 1_000_000.0 * price).round(6)
867
+ end
868
+
869
+ # Calculates total cost from tokens
870
+ #
871
+ # @param input_tokens [Integer, nil] Number of input tokens
872
+ # @param output_tokens [Integer, nil] Number of output tokens
873
+ # @param response_model_id [String, nil] Model that responded
874
+ # @return [Float, nil] Total cost in USD
875
+ def result_total_cost(input_tokens, output_tokens, response_model_id)
876
+ input_cost = result_input_cost(input_tokens, response_model_id)
877
+ output_cost = result_output_cost(output_tokens, response_model_id)
878
+ return nil unless input_cost || output_cost
879
+ ((input_cost || 0) + (output_cost || 0)).round(6)
880
+ end
881
+
882
+ # Resolves model info for cost calculation
883
+ #
884
+ # @param response_model_id [String, nil] Model ID from response
885
+ # @return [Object, nil] Model info or nil
886
+ def result_model_info(response_model_id)
887
+ lookup_id = response_model_id || model
888
+ return nil unless lookup_id
889
+ model_obj, _provider = RubyLLM::Models.resolve(lookup_id)
890
+ model_obj
891
+ rescue StandardError
892
+ nil
893
+ end
894
+
895
+ # @!endgroup
896
+
897
+ # @!group Tool Call Tracking
898
+
899
+ # Resets accumulated tool calls for a new execution
900
+ #
901
+ # @return [void]
902
+ def reset_accumulated_tool_calls!
903
+ @accumulated_tool_calls = []
904
+ end
905
+
906
+ # Extracts tool calls from all assistant messages in the conversation
907
+ #
908
+ # RubyLLM handles tool call loops internally. After ask() completes,
909
+ # the conversation history contains all intermediate assistant messages
910
+ # that had tool_calls. This method extracts those tool calls.
911
+ #
912
+ # @param client [RubyLLM::Chat] The chat client with conversation history
913
+ # @return [void]
914
+ def extract_tool_calls_from_client(client)
915
+ return unless client.respond_to?(:messages)
916
+
917
+ client.messages.each do |message|
918
+ next unless message.role == :assistant
919
+ next unless message.respond_to?(:tool_calls) && message.tool_calls.present?
920
+
921
+ message.tool_calls.each_value do |tool_call|
922
+ @accumulated_tool_calls << serialize_tool_call(tool_call)
923
+ end
924
+ end
925
+ end
926
+
927
+ # Serializes a single tool call to a hash
928
+ #
929
+ # @param tool_call [Object] The tool call object
930
+ # @return [Hash] Serialized tool call
931
+ def serialize_tool_call(tool_call)
932
+ if tool_call.respond_to?(:to_h)
933
+ tool_call.to_h.transform_keys(&:to_s)
934
+ else
935
+ {
936
+ "id" => tool_call.id,
937
+ "name" => tool_call.name,
938
+ "arguments" => tool_call.arguments
939
+ }
940
+ end
941
+ end
942
+
943
+ # @!endgroup
737
944
  end
738
945
  end
739
946
  end
@@ -385,6 +385,12 @@ module RubyLLM
385
385
  update_data[:response] = redacted_response(@last_response)
386
386
  end
387
387
 
388
+ # Add tool calls from accumulated_tool_calls (captured from all responses)
389
+ if respond_to?(:accumulated_tool_calls) && accumulated_tool_calls.present?
390
+ update_data[:tool_calls] = accumulated_tool_calls
391
+ update_data[:tool_calls_count] = accumulated_tool_calls.size
392
+ end
393
+
388
394
  # Add error data if failed
389
395
  if error
390
396
  update_data.merge!(
@@ -566,7 +572,11 @@ module RubyLLM
566
572
  # @param response [RubyLLM::Message, nil] The LLM response
567
573
  # @return [Hash] Extracted response data (empty if response invalid)
568
574
  def safe_extract_response_data(response)
569
- return {} unless response.is_a?(RubyLLM::Message)
575
+ return {} unless response.respond_to?(:input_tokens)
576
+
577
+ # Use accumulated_tool_calls which captures tool calls from ALL responses
578
+ # during multi-turn conversations (when tools are used)
579
+ tool_calls_data = respond_to?(:accumulated_tool_calls) ? accumulated_tool_calls : []
570
580
 
571
581
  {
572
582
  input_tokens: safe_response_value(response, :input_tokens),
@@ -575,7 +585,9 @@ module RubyLLM
575
585
  cache_creation_tokens: safe_response_value(response, :cache_creation_tokens, 0),
576
586
  model_id: safe_response_value(response, :model_id),
577
587
  finish_reason: safe_extract_finish_reason(response),
578
- response: safe_serialize_response(response)
588
+ response: safe_serialize_response(response),
589
+ tool_calls: tool_calls_data || [],
590
+ tool_calls_count: tool_calls_data&.size || 0
579
591
  }.compact
580
592
  end
581
593
 
@@ -689,16 +701,37 @@ module RubyLLM
689
701
  # @param response [RubyLLM::Message] The LLM response
690
702
  # @return [Hash] Serialized response data
691
703
  def safe_serialize_response(response)
704
+ # Use accumulated_tool_calls which captures tool calls from ALL responses
705
+ tool_calls_data = respond_to?(:accumulated_tool_calls) ? accumulated_tool_calls : nil
706
+
692
707
  {
693
708
  content: safe_response_value(response, :content),
694
709
  model_id: safe_response_value(response, :model_id),
695
710
  input_tokens: safe_response_value(response, :input_tokens),
696
711
  output_tokens: safe_response_value(response, :output_tokens),
697
712
  cached_tokens: safe_response_value(response, :cached_tokens, 0),
698
- cache_creation_tokens: safe_response_value(response, :cache_creation_tokens, 0)
713
+ cache_creation_tokens: safe_response_value(response, :cache_creation_tokens, 0),
714
+ tool_calls: tool_calls_data.presence
699
715
  }.compact
700
716
  end
701
717
 
718
+ # Serializes tool calls to an array of hashes for storage
719
+ #
720
+ # @param response [RubyLLM::Message] The LLM response
721
+ # @return [Array<Hash>, nil] Serialized tool calls or nil if none
722
+ def serialize_tool_calls(response)
723
+ tool_calls = safe_response_value(response, :tool_calls)
724
+ return nil if tool_calls.nil? || tool_calls.empty?
725
+
726
+ tool_calls.map do |id, tool_call|
727
+ if tool_call.respond_to?(:to_h)
728
+ tool_call.to_h
729
+ else
730
+ { id: id, name: tool_call[:name], arguments: tool_call[:arguments] }
731
+ end
732
+ end
733
+ end
734
+
702
735
  # Emergency fallback to mark execution as failed
703
736
  #
704
737
  # Uses update_all to bypass ActiveRecord callbacks and validations,
@@ -108,7 +108,7 @@ module RubyLLM
108
108
  redact_string(value, config)
109
109
  when defined?(ActiveRecord::Base) && ActiveRecord::Base
110
110
  # Convert ActiveRecord objects to safe references
111
- { id: value.id, type: value.class.name }
111
+ { id: value&.id, type: value&.class&.name }
112
112
  else
113
113
  value
114
114
  end
@@ -0,0 +1,235 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Agents
5
+ # Wrapper for agent execution results with full metadata
6
+ #
7
+ # Provides access to the response content along with execution details
8
+ # like token usage, cost, timing, and model information.
9
+ #
10
+ # @example Basic usage
11
+ # result = MyAgent.call(query: "test")
12
+ # result.content # => processed response
13
+ # result.input_tokens # => 150
14
+ # result.total_cost # => 0.00025
15
+ #
16
+ # @example Backward compatible hash access
17
+ # result[:key] # delegates to result.content[:key]
18
+ # result.dig(:nested, :key)
19
+ #
20
+ # @api public
21
+ class Result
22
+ extend ActiveSupport::Delegation
23
+
24
+ # @!attribute [r] content
25
+ # @return [Hash, String] The processed response content
26
+ attr_reader :content
27
+
28
+ # @!group Token Usage
29
+ # @!attribute [r] input_tokens
30
+ # @return [Integer, nil] Number of input tokens consumed
31
+ # @!attribute [r] output_tokens
32
+ # @return [Integer, nil] Number of output tokens generated
33
+ # @!attribute [r] cached_tokens
34
+ # @return [Integer] Number of tokens served from cache
35
+ # @!attribute [r] cache_creation_tokens
36
+ # @return [Integer] Number of tokens used to create cache
37
+ attr_reader :input_tokens, :output_tokens, :cached_tokens, :cache_creation_tokens
38
+
39
+ # @!group Cost
40
+ # @!attribute [r] input_cost
41
+ # @return [Float, nil] Cost of input tokens in USD
42
+ # @!attribute [r] output_cost
43
+ # @return [Float, nil] Cost of output tokens in USD
44
+ # @!attribute [r] total_cost
45
+ # @return [Float, nil] Total cost in USD
46
+ attr_reader :input_cost, :output_cost, :total_cost
47
+
48
+ # @!group Model Info
49
+ # @!attribute [r] model_id
50
+ # @return [String, nil] The model that was requested
51
+ # @!attribute [r] chosen_model_id
52
+ # @return [String, nil] The model that actually responded (may differ if fallback used)
53
+ # @!attribute [r] temperature
54
+ # @return [Float, nil] Temperature setting used
55
+ attr_reader :model_id, :chosen_model_id, :temperature
56
+
57
+ # @!group Timing
58
+ # @!attribute [r] started_at
59
+ # @return [Time, nil] When execution started
60
+ # @!attribute [r] completed_at
61
+ # @return [Time, nil] When execution completed
62
+ # @!attribute [r] duration_ms
63
+ # @return [Integer, nil] Execution duration in milliseconds
64
+ # @!attribute [r] time_to_first_token_ms
65
+ # @return [Integer, nil] Time to first token (streaming only)
66
+ attr_reader :started_at, :completed_at, :duration_ms, :time_to_first_token_ms
67
+
68
+ # @!group Status
69
+ # @!attribute [r] finish_reason
70
+ # @return [String, nil] Why generation stopped (stop, length, tool_calls, etc.)
71
+ # @!attribute [r] streaming
72
+ # @return [Boolean] Whether streaming was enabled
73
+ attr_reader :finish_reason, :streaming
74
+
75
+ # @!group Error Info
76
+ # @!attribute [r] error_class
77
+ # @return [String, nil] Exception class name if failed
78
+ # @!attribute [r] error_message
79
+ # @return [String, nil] Exception message if failed
80
+ attr_reader :error_class, :error_message
81
+
82
+ # @!group Reliability
83
+ # @!attribute [r] attempts
84
+ # @return [Array<Hash>] Details of each attempt (for retries/fallbacks)
85
+ # @!attribute [r] attempts_count
86
+ # @return [Integer] Number of attempts made
87
+ attr_reader :attempts, :attempts_count
88
+
89
+ # @!group Tool Calls
90
+ # @!attribute [r] tool_calls
91
+ # @return [Array<Hash>] Tool calls made during execution
92
+ # @!attribute [r] tool_calls_count
93
+ # @return [Integer] Number of tool calls made
94
+ attr_reader :tool_calls, :tool_calls_count
95
+
96
+ # Creates a new Result instance
97
+ #
98
+ # @param content [Hash, String] The processed response content
99
+ # @param options [Hash] Execution metadata
100
+ def initialize(content:, **options)
101
+ @content = content
102
+
103
+ # Token usage
104
+ @input_tokens = options[:input_tokens]
105
+ @output_tokens = options[:output_tokens]
106
+ @cached_tokens = options[:cached_tokens] || 0
107
+ @cache_creation_tokens = options[:cache_creation_tokens] || 0
108
+
109
+ # Cost
110
+ @input_cost = options[:input_cost]
111
+ @output_cost = options[:output_cost]
112
+ @total_cost = options[:total_cost]
113
+
114
+ # Model info
115
+ @model_id = options[:model_id]
116
+ @chosen_model_id = options[:chosen_model_id] || options[:model_id]
117
+ @temperature = options[:temperature]
118
+
119
+ # Timing
120
+ @started_at = options[:started_at]
121
+ @completed_at = options[:completed_at]
122
+ @duration_ms = options[:duration_ms]
123
+ @time_to_first_token_ms = options[:time_to_first_token_ms]
124
+
125
+ # Status
126
+ @finish_reason = options[:finish_reason]
127
+ @streaming = options[:streaming] || false
128
+
129
+ # Error
130
+ @error_class = options[:error_class]
131
+ @error_message = options[:error_message]
132
+
133
+ # Reliability
134
+ @attempts = options[:attempts] || []
135
+ @attempts_count = options[:attempts_count] || 1
136
+
137
+ # Tool calls
138
+ @tool_calls = options[:tool_calls] || []
139
+ @tool_calls_count = options[:tool_calls_count] || 0
140
+ end
141
+
142
+ # Returns total tokens (input + output)
143
+ #
144
+ # @return [Integer] Total token count
145
+ def total_tokens
146
+ (input_tokens || 0) + (output_tokens || 0)
147
+ end
148
+
149
+ # Returns whether streaming was enabled
150
+ #
151
+ # @return [Boolean] true if streaming was used
152
+ def streaming?
153
+ streaming == true
154
+ end
155
+
156
+ # Returns whether the execution succeeded
157
+ #
158
+ # @return [Boolean] true if no error occurred
159
+ def success?
160
+ error_class.nil?
161
+ end
162
+
163
+ # Returns whether the execution failed
164
+ #
165
+ # @return [Boolean] true if an error occurred
166
+ def error?
167
+ !success?
168
+ end
169
+
170
+ # Returns whether a fallback model was used
171
+ #
172
+ # @return [Boolean] true if chosen_model_id differs from model_id
173
+ def used_fallback?
174
+ chosen_model_id.present? && chosen_model_id != model_id
175
+ end
176
+
177
+ # Returns whether the response was truncated due to max tokens
178
+ #
179
+ # @return [Boolean] true if finish_reason is "length"
180
+ def truncated?
181
+ finish_reason == "length"
182
+ end
183
+
184
+ # Returns whether tool calls were made during execution
185
+ #
186
+ # @return [Boolean] true if tool_calls_count > 0
187
+ def has_tool_calls?
188
+ tool_calls_count.to_i > 0
189
+ end
190
+
191
+ # Converts the result to a hash
192
+ #
193
+ # @return [Hash] All result data as a hash
194
+ def to_h
195
+ {
196
+ content: content,
197
+ input_tokens: input_tokens,
198
+ output_tokens: output_tokens,
199
+ total_tokens: total_tokens,
200
+ cached_tokens: cached_tokens,
201
+ cache_creation_tokens: cache_creation_tokens,
202
+ input_cost: input_cost,
203
+ output_cost: output_cost,
204
+ total_cost: total_cost,
205
+ model_id: model_id,
206
+ chosen_model_id: chosen_model_id,
207
+ temperature: temperature,
208
+ started_at: started_at,
209
+ completed_at: completed_at,
210
+ duration_ms: duration_ms,
211
+ time_to_first_token_ms: time_to_first_token_ms,
212
+ finish_reason: finish_reason,
213
+ streaming: streaming,
214
+ error_class: error_class,
215
+ error_message: error_message,
216
+ attempts_count: attempts_count,
217
+ attempts: attempts,
218
+ tool_calls: tool_calls,
219
+ tool_calls_count: tool_calls_count
220
+ }
221
+ end
222
+
223
+ # Delegate hash methods to content for backward compatibility
224
+ delegate :[], :dig, :keys, :values, :each, :map, to: :content, allow_nil: true
225
+
226
+ # Custom to_json that returns content as JSON for backward compatibility
227
+ #
228
+ # @param args [Array] Arguments passed to to_json
229
+ # @return [String] JSON representation
230
+ def to_json(*args)
231
+ content.to_json(*args)
232
+ end
233
+ end
234
+ end
235
+ end
@@ -4,6 +4,6 @@ module RubyLLM
4
4
  module Agents
5
5
  # Current version of the RubyLLM::Agents gem
6
6
  # @return [String] Semantic version string
7
- VERSION = "0.3.0"
7
+ VERSION = "0.3.3"
8
8
  end
9
9
  end
@@ -10,6 +10,7 @@ require_relative "agents/circuit_breaker"
10
10
  require_relative "agents/budget_tracker"
11
11
  require_relative "agents/alert_manager"
12
12
  require_relative "agents/attempt_tracker"
13
+ require_relative "agents/result"
13
14
  require_relative "agents/inflections" if defined?(Rails)
14
15
  require_relative "agents/engine" if defined?(Rails::Engine)
15
16