activeagent 0.5.0 → 0.6.0rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,167 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ActiveAgent
4
+ module GenerationProvider
5
+ module ErrorHandling
6
+ extend ActiveSupport::Concern
7
+ include ActiveSupport::Rescuable
8
+
9
+ included do
10
+ class_attribute :retry_on_errors, default: []
11
+ class_attribute :max_retries, default: 3
12
+ class_attribute :verbose_errors_enabled, default: false
13
+
14
+ # Use rescue_from for provider-specific error handling
15
+ rescue_from StandardError, with: :handle_generation_error
16
+ end
17
+
18
+ def with_error_handling
19
+ retries = 0
20
+ begin
21
+ yield
22
+ rescue => e
23
+ if should_retry?(e) && retries < max_retries
24
+ retries += 1
25
+ log_retry(e, retries) if verbose_errors?
26
+ sleep(retry_delay(retries))
27
+ retry
28
+ else
29
+ # Use rescue_with_handler from Rescuable
30
+ rescue_with_handler(e) || raise
31
+ end
32
+ end
33
+ end
34
+
35
+ protected
36
+
37
+ def should_retry?(error)
38
+ return false if retry_on_errors.empty?
39
+ retry_on_errors.any? { |klass| error.is_a?(klass) }
40
+ end
41
+
42
+ def retry_delay(attempt)
43
+ # Exponential backoff: 1s, 2s, 4s...
44
+ 2 ** (attempt - 1)
45
+ end
46
+
47
+ def handle_generation_error(error)
48
+ error_message = format_error_message(error)
49
+ # Create new error with original backtrace preserved
50
+ new_error = ActiveAgent::GenerationProvider::Base::GenerationProviderError.new(error_message)
51
+ new_error.set_backtrace(error.backtrace) if error.respond_to?(:backtrace)
52
+
53
+ # Log detailed error if verbose mode is enabled
54
+ log_error_details(error) if verbose_errors?
55
+
56
+ # Instrument the error for LogSubscriber
57
+ instrument_error(error, new_error)
58
+
59
+ raise new_error
60
+ end
61
+
62
+ def format_error_message(error)
63
+ message = if error.respond_to?(:response)
64
+ error.response[:body]
65
+ elsif error.respond_to?(:message)
66
+ error.message
67
+ elsif error.respond_to?(:to_s)
68
+ error.to_s
69
+ else
70
+ "An unknown error occurred: #{error.class.name}"
71
+ end
72
+
73
+ # Include error class in verbose mode
74
+ if verbose_errors?
75
+ "[#{error.class.name}] #{message}"
76
+ else
77
+ message
78
+ end
79
+ end
80
+
81
+ def verbose_errors?
82
+ # Check multiple sources for verbose setting (in priority order)
83
+ # 1. Instance config (highest priority)
84
+ return true if @config&.dig("verbose_errors")
85
+
86
+ # 2. Class-level setting
87
+ return true if self.class.verbose_errors_enabled
88
+
89
+ # 3. ActiveAgent global configuration
90
+ if defined?(ActiveAgent) && ActiveAgent.respond_to?(:configuration)
91
+ return true if ActiveAgent.configuration.verbose_generation_errors?
92
+ end
93
+
94
+ # 4. Environment variable (lowest priority)
95
+ ENV["ACTIVE_AGENT_VERBOSE_ERRORS"] == "true"
96
+ end
97
+
98
+ def log_error_details(error)
99
+ logger = find_logger
100
+ return unless logger
101
+
102
+ logger.error "[ActiveAgent::GenerationProvider] Error: #{error.class.name}: #{error.message}"
103
+ if logger.respond_to?(:debug) && error.respond_to?(:backtrace)
104
+ logger.debug "Backtrace:\n #{error.backtrace&.first(10)&.join("\n ")}"
105
+ end
106
+ end
107
+
108
+ def log_retry(error, attempt)
109
+ logger = find_logger
110
+ return unless logger
111
+
112
+ message = "[ActiveAgent::GenerationProvider] Retry attempt #{attempt}/#{max_retries} after #{error.class.name}"
113
+ logger.info message
114
+ end
115
+
116
+ def find_logger
117
+ # Try multiple logger sources (in priority order)
118
+ # 1. Instance config
119
+ return @config["logger"] if @config&.dig("logger")
120
+
121
+ # 2. ActiveAgent configuration logger
122
+ if defined?(ActiveAgent) && ActiveAgent.respond_to?(:configuration)
123
+ config_logger = ActiveAgent.configuration.generation_provider_logger
124
+ return config_logger if config_logger
125
+ end
126
+
127
+ # 3. Rails logger
128
+ return Rails.logger if defined?(Rails) && Rails.logger
129
+
130
+ # 4. ActiveAgent::Base logger
131
+ return ActiveAgent::Base.logger if defined?(ActiveAgent::Base) && ActiveAgent::Base.respond_to?(:logger)
132
+
133
+ nil
134
+ end
135
+
136
+ def instrument_error(original_error, wrapped_error)
137
+ if defined?(ActiveSupport::Notifications)
138
+ ActiveSupport::Notifications.instrument("error.active_agent", {
139
+ error_class: original_error.class.name,
140
+ error_message: original_error.message,
141
+ wrapped_error: wrapped_error,
142
+ provider: self.class.name
143
+ })
144
+ end
145
+ end
146
+
147
+ module ClassMethods
148
+ def retry_on(*errors, max_attempts: 3, **options)
149
+ self.retry_on_errors = errors
150
+ self.max_retries = max_attempts
151
+
152
+ # Also register with rescue_from for more complex handling
153
+ errors.each do |error_class|
154
+ rescue_from error_class do |error|
155
+ # This will be caught by with_error_handling for retry logic
156
+ raise error
157
+ end
158
+ end
159
+ end
160
+
161
+ def enable_verbose_errors!
162
+ self.verbose_errors_enabled = true
163
+ end
164
+ end
165
+ end
166
+ end
167
+ end
@@ -0,0 +1,92 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_support/log_subscriber"
4
+
5
+ module ActiveAgent
6
+ module GenerationProvider
7
+ # = Generation Provider \\LogSubscriber
8
+ #
9
+ # Implements the ActiveSupport::LogSubscriber for logging notifications when
10
+ # generation providers make API calls and handle responses.
11
+ class LogSubscriber < ActiveSupport::LogSubscriber
12
+ # A generation request was made
13
+ def generate(event)
14
+ info do
15
+ provider = event.payload[:provider]
16
+ model = event.payload[:model]
17
+
18
+ if exception = event.payload[:exception_object]
19
+ "Failed generation with #{provider} model=#{model} error_class=#{exception.class} error_message=#{exception.message.inspect}"
20
+ else
21
+ "Generated response with #{provider} model=#{model} (#{event.duration.round(1)}ms)"
22
+ end
23
+ end
24
+
25
+ debug { event.payload[:prompt] } if event.payload[:prompt]
26
+ end
27
+ subscribe_log_level :generate, :debug
28
+
29
+ # Streaming chunk received
30
+ def stream_chunk(event)
31
+ debug do
32
+ provider = event.payload[:provider]
33
+ chunk_size = event.payload[:chunk_size]
34
+ "#{provider}: received stream chunk (#{chunk_size} bytes)"
35
+ end
36
+ end
37
+ subscribe_log_level :stream_chunk, :debug
38
+
39
+ # Tool/function call executed
40
+ def tool_call(event)
41
+ info do
42
+ tool_name = event.payload[:tool_name]
43
+ tool_id = event.payload[:tool_id]
44
+
45
+ if exception = event.payload[:exception_object]
46
+ "Failed tool call #{tool_name} id=#{tool_id} error=#{exception.class}"
47
+ else
48
+ "Executed tool call #{tool_name} id=#{tool_id} (#{event.duration.round(1)}ms)"
49
+ end
50
+ end
51
+ end
52
+ subscribe_log_level :tool_call, :debug
53
+
54
+ # Retry attempt
55
+ def retry(event)
56
+ warn do
57
+ provider = event.payload[:provider]
58
+ attempt = event.payload[:attempt]
59
+ max_attempts = event.payload[:max_attempts]
60
+ error_class = event.payload[:error_class]
61
+
62
+ "#{provider}: Retry attempt #{attempt}/#{max_attempts} after #{error_class}"
63
+ end
64
+ end
65
+ subscribe_log_level :retry, :warn
66
+
67
+ # Error occurred
68
+ def error(event)
69
+ error do
70
+ provider = event.payload[:provider]
71
+ error_class = event.payload[:error_class]
72
+ error_message = event.payload[:error_message]
73
+
74
+ "#{provider}: Error #{error_class} - #{error_message}"
75
+ end
76
+ end
77
+ subscribe_log_level :error, :error
78
+
79
+ # Use the logger configured for ActiveAgent::Base if available
80
+ def logger
81
+ if defined?(ActiveAgent::Base) && ActiveAgent::Base.respond_to?(:logger)
82
+ ActiveAgent::Base.logger
83
+ else
84
+ super
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end
90
+
91
+ # Attach to active_agent.generation_provider namespace
92
+ ActiveAgent::GenerationProvider::LogSubscriber.attach_to :"active_agent.generation_provider"
@@ -0,0 +1,107 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ActiveAgent
4
+ module GenerationProvider
5
+ module MessageFormatting
6
+ extend ActiveSupport::Concern
7
+
8
+ def provider_messages(messages)
9
+ messages.map do |message|
10
+ format_message(message)
11
+ end
12
+ end
13
+
14
+ protected
15
+
16
+ def format_message(message)
17
+ base_message = {
18
+ role: convert_role(message.role),
19
+ content: format_content(message)
20
+ }
21
+
22
+ add_tool_fields(base_message, message)
23
+ add_metadata_fields(base_message, message)
24
+
25
+ base_message.compact
26
+ end
27
+
28
+ def convert_role(role)
29
+ # Default role conversion - override in provider for specific mappings
30
+ role.to_s
31
+ end
32
+
33
+ def format_content(message)
34
+ # Handle multimodal content
35
+ case message.content_type
36
+ when "image_url"
37
+ format_image_content(message)
38
+ when "multipart/mixed", "array"
39
+ format_multimodal_content(message)
40
+ else
41
+ message.content
42
+ end
43
+ end
44
+
45
+ def format_image_content(message)
46
+ # Default implementation - override in provider
47
+ message.content
48
+ end
49
+
50
+ def format_multimodal_content(message)
51
+ # Default implementation for multimodal content
52
+ if message.content.is_a?(Array)
53
+ message.content.map do |item|
54
+ format_content_item(item)
55
+ end
56
+ else
57
+ message.content
58
+ end
59
+ end
60
+
61
+ def format_content_item(item)
62
+ # Format individual content items in multimodal messages
63
+ # Override in provider for specific formatting
64
+ item
65
+ end
66
+
67
+ def add_tool_fields(base_message, message)
68
+ # Add tool-specific fields based on role
69
+ case message.role.to_s
70
+ when "assistant"
71
+ if message.action_requested && message.requested_actions.any?
72
+ base_message[:tool_calls] = format_tool_calls(message.requested_actions)
73
+ elsif message.raw_actions.present? && message.raw_actions.is_a?(Array)
74
+ base_message[:tool_calls] = message.raw_actions
75
+ end
76
+ when "tool"
77
+ base_message[:tool_call_id] = message.action_id if message.action_id
78
+ base_message[:name] = message.action_name if message.action_name
79
+ end
80
+ end
81
+
82
+ def add_metadata_fields(base_message, message)
83
+ # Override to add provider-specific metadata
84
+ # For example: message IDs, timestamps, etc.
85
+ end
86
+
87
+ def format_tool_calls(actions)
88
+ # Default implementation - override in provider for specific format
89
+ actions.map do |action|
90
+ format_single_tool_call(action)
91
+ end
92
+ end
93
+
94
+ def format_single_tool_call(action)
95
+ # Default tool call format (OpenAI style)
96
+ {
97
+ type: "function",
98
+ function: {
99
+ name: action.name,
100
+ arguments: action.params.is_a?(String) ? action.params : action.params.to_json
101
+ },
102
+ id: action.id
103
+ }
104
+ end
105
+ end
106
+ end
107
+ end
@@ -1,5 +1,5 @@
1
1
  begin
2
- gem "ruby-openai", "~> 8.1.0"
2
+ gem "ruby-openai", "~> 8.2.0"
3
3
  require "openai"
4
4
  rescue LoadError
5
5
  raise LoadError, "The 'ruby-openai' gem is required for OpenAIProvider. Please add it to your Gemfile and run `bundle install`."
@@ -9,17 +9,29 @@ require "active_agent/action_prompt/action"
9
9
  require_relative "base"
10
10
  require_relative "response"
11
11
  require_relative "responses_adapter"
12
+ require_relative "stream_processing"
13
+ require_relative "message_formatting"
14
+ require_relative "tool_management"
12
15
 
13
16
  module ActiveAgent
14
17
  module GenerationProvider
15
18
  class OpenAIProvider < Base
19
+ include StreamProcessing
20
+ include MessageFormatting
21
+ include ToolManagement
16
22
  def initialize(config)
17
23
  super
18
24
  @host = config["host"] || nil
19
25
  @access_token ||= config["api_key"] || config["access_token"] || OpenAI.configuration.access_token || ENV["OPENAI_ACCESS_TOKEN"]
20
26
  @organization_id = config["organization_id"] || OpenAI.configuration.organization_id || ENV["OPENAI_ORGANIZATION_ID"]
21
27
  @admin_token = config["admin_token"] || OpenAI.configuration.admin_token || ENV["OPENAI_ADMIN_TOKEN"]
22
- @client = OpenAI::Client.new(access_token: @access_token, uri_base: @host, organization_id: @organization_id)
28
+ @client = OpenAI::Client.new(
29
+ access_token: @access_token,
30
+ uri_base: @host,
31
+ organization_id: @organization_id,
32
+ admin_token: @admin_token,
33
+ log_errors: Rails.env.development?
34
+ )
23
35
 
24
36
  @model_name = config["model"] || "gpt-4o-mini"
25
37
  end
@@ -27,84 +39,58 @@ module ActiveAgent
27
39
  def generate(prompt)
28
40
  @prompt = prompt
29
41
 
30
- if @prompt.multimodal? || @prompt.content_type == "multipart/mixed"
31
- responses_prompt(parameters: responses_parameters)
32
- else
33
- chat_prompt(parameters: prompt_parameters)
42
+ with_error_handling do
43
+ if @prompt.multimodal? || @prompt.content_type == "multipart/mixed"
44
+ responses_prompt(parameters: responses_parameters)
45
+ else
46
+ chat_prompt(parameters: prompt_parameters)
47
+ end
34
48
  end
35
- rescue => e
36
- error_message = e.respond_to?(:message) ? e.message : e.to_s
37
- raise GenerationProviderError, error_message
38
49
  end
39
50
 
40
51
  def embed(prompt)
41
52
  @prompt = prompt
42
53
 
43
- embeddings_prompt(parameters: embeddings_parameters)
44
- rescue => e
45
- error_message = e.respond_to?(:message) ? e.message : e.to_s
46
- raise GenerationProviderError, error_message
54
+ with_error_handling do
55
+ embeddings_prompt(parameters: embeddings_parameters)
56
+ end
47
57
  end
48
58
 
49
- private
50
-
51
- def provider_stream
52
- agent_stream = prompt.options[:stream]
53
-
54
- message = ActiveAgent::ActionPrompt::Message.new(content: "", role: :assistant)
55
-
56
- @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
57
- proc do |chunk, bytesize|
58
- new_content = chunk.dig("choices", 0, "delta", "content")
59
- if new_content && !new_content.blank?
60
- message.generation_id = chunk.dig("id")
61
- message.content += new_content
62
-
63
- agent_stream.call(message, new_content, false, prompt.action_name) do |message, new_content|
64
- yield message, new_content if block_given?
65
- end
66
- elsif chunk.dig("choices", 0, "delta", "tool_calls") && chunk.dig("choices", 0, "delta", "role")
67
- message = handle_message(chunk.dig("choices", 0, "delta"))
68
- prompt.messages << message
69
- @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
70
- end
59
+ protected
60
+
61
+ # Override from StreamProcessing module
62
+ def process_stream_chunk(chunk, message, agent_stream)
63
+ new_content = chunk.dig("choices", 0, "delta", "content")
64
+ if new_content && !new_content.blank?
65
+ message.generation_id = chunk.dig("id")
66
+ message.content += new_content
67
+ # Call agent_stream directly without the block to avoid double execution
68
+ agent_stream&.call(message, new_content, false, prompt.action_name)
69
+ elsif chunk.dig("choices", 0, "delta", "tool_calls") && chunk.dig("choices", 0, "delta", "role")
70
+ message = handle_message(chunk.dig("choices", 0, "delta"))
71
+ prompt.messages << message
72
+ @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
73
+ end
71
74
 
72
- agent_stream.call(message, nil, true, prompt.action_name) do |message|
73
- yield message, nil if block_given?
74
- end
75
+ if chunk.dig("choices", 0, "finish_reason")
76
+ finalize_stream(message, agent_stream)
75
77
  end
76
78
  end
77
79
 
78
- def prompt_parameters(model: @prompt.options[:model] || @model_name, messages: @prompt.messages, temperature: @prompt.options[:temperature] || @config["temperature"] || 0.7, tools: @prompt.actions)
79
- {
80
- model: model,
81
- messages: provider_messages(messages),
82
- temperature: temperature,
83
- max_tokens: @prompt.options[:max_tokens] || @config["max_tokens"],
84
- tools: tools.presence
85
- }.compact
80
+ # Override from MessageFormatting module to handle OpenAI image format
81
+ def format_image_content(message)
82
+ [ {
83
+ type: "image_url",
84
+ image_url: { url: message.content }
85
+ } ]
86
86
  end
87
87
 
88
- def provider_messages(messages)
89
- messages.map do |message|
90
- provider_message = {
91
- role: message.role,
92
- tool_call_id: message.action_id.presence,
93
- name: message.action_name.presence,
94
- tool_calls: message.raw_actions.present? ? message.raw_actions[:tool_calls] : (message.requested_actions.map { |action| { type: "function", name: action.name, arguments: action.params.to_json } } if message.action_requested),
95
- generation_id: message.generation_id,
96
- content: message.content,
97
- type: message.content_type,
98
- charset: message.charset
99
- }.compact
100
-
101
- if message.content_type == "image_url" || message.content[0..4] == "data:"
102
- provider_message[:type] = "image_url"
103
- provider_message[:image_url] = { url: message.content }
104
- end
105
- provider_message
106
- end
107
- end
88
+ private
89
+
90
+ # Now using modules, but we can override build_provider_parameters for OpenAI-specific needs
91
+ # The prompt_parameters method comes from ParameterBuilder module
92
+ # The format_tools method comes from ToolManagement module
93
+ # The provider_messages method comes from MessageFormatting module
108
94
 
109
95
  def chat_response(response)
110
96
  return @response if prompt.options[:stream]
@@ -118,7 +104,7 @@ module ActiveAgent
118
104
  end
119
105
 
120
106
  def responses_response(response)
121
- message_json = response.dig("output", 0)
107
+ message_json = response["output"].find { |output_item| output_item["type"] == "message" }
122
108
  message_json["id"] = response.dig("id") if message_json["id"].blank?
123
109
 
124
110
  message = ActiveAgent::ActionPrompt::Message.new(
@@ -144,20 +130,7 @@ module ActiveAgent
144
130
  )
145
131
  end
146
132
 
147
- def handle_actions(tool_calls)
148
- return [] if tool_calls.nil? || tool_calls.empty?
149
-
150
- tool_calls.map do |tool_call|
151
- next if tool_call["function"].nil? || tool_call["function"]["name"].blank?
152
- args = tool_call["function"]["arguments"].blank? ? nil : JSON.parse(tool_call["function"]["arguments"], { symbolize_names: true })
153
-
154
- ActiveAgent::ActionPrompt::Action.new(
155
- id: tool_call["id"],
156
- name: tool_call.dig("function", "name"),
157
- params: args
158
- )
159
- end.compact
160
- end
133
+ # handle_actions is now provided by ToolManagement module
161
134
 
162
135
  def chat_prompt(parameters: prompt_parameters)
163
136
  parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"]