activeagent 0.5.0 → 0.6.0rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,92 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_support/log_subscriber"
4
+
5
+ module ActiveAgent
6
+ module GenerationProvider
7
+ # = Generation Provider \\LogSubscriber
8
+ #
9
+ # Implements the ActiveSupport::LogSubscriber for logging notifications when
10
+ # generation providers make API calls and handle responses.
11
+ class LogSubscriber < ActiveSupport::LogSubscriber
12
+ # A generation request was made
13
+ def generate(event)
14
+ info do
15
+ provider = event.payload[:provider]
16
+ model = event.payload[:model]
17
+
18
+ if exception = event.payload[:exception_object]
19
+ "Failed generation with #{provider} model=#{model} error_class=#{exception.class} error_message=#{exception.message.inspect}"
20
+ else
21
+ "Generated response with #{provider} model=#{model} (#{event.duration.round(1)}ms)"
22
+ end
23
+ end
24
+
25
+ debug { event.payload[:prompt] } if event.payload[:prompt]
26
+ end
27
+ subscribe_log_level :generate, :debug
28
+
29
+ # Streaming chunk received
30
+ def stream_chunk(event)
31
+ debug do
32
+ provider = event.payload[:provider]
33
+ chunk_size = event.payload[:chunk_size]
34
+ "#{provider}: received stream chunk (#{chunk_size} bytes)"
35
+ end
36
+ end
37
+ subscribe_log_level :stream_chunk, :debug
38
+
39
+ # Tool/function call executed
40
+ def tool_call(event)
41
+ info do
42
+ tool_name = event.payload[:tool_name]
43
+ tool_id = event.payload[:tool_id]
44
+
45
+ if exception = event.payload[:exception_object]
46
+ "Failed tool call #{tool_name} id=#{tool_id} error=#{exception.class}"
47
+ else
48
+ "Executed tool call #{tool_name} id=#{tool_id} (#{event.duration.round(1)}ms)"
49
+ end
50
+ end
51
+ end
52
+ subscribe_log_level :tool_call, :debug
53
+
54
+ # Retry attempt
55
+ def retry(event)
56
+ warn do
57
+ provider = event.payload[:provider]
58
+ attempt = event.payload[:attempt]
59
+ max_attempts = event.payload[:max_attempts]
60
+ error_class = event.payload[:error_class]
61
+
62
+ "#{provider}: Retry attempt #{attempt}/#{max_attempts} after #{error_class}"
63
+ end
64
+ end
65
+ subscribe_log_level :retry, :warn
66
+
67
+ # Error occurred
68
+ def error(event)
69
+ error do
70
+ provider = event.payload[:provider]
71
+ error_class = event.payload[:error_class]
72
+ error_message = event.payload[:error_message]
73
+
74
+ "#{provider}: Error #{error_class} - #{error_message}"
75
+ end
76
+ end
77
+ subscribe_log_level :error, :error
78
+
79
+ # Use the logger configured for ActiveAgent::Base if available
80
+ def logger
81
+ if defined?(ActiveAgent::Base) && ActiveAgent::Base.respond_to?(:logger)
82
+ ActiveAgent::Base.logger
83
+ else
84
+ super
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end
90
+
91
+ # Attach to active_agent.generation_provider namespace
92
+ ActiveAgent::GenerationProvider::LogSubscriber.attach_to :"active_agent.generation_provider"
@@ -0,0 +1,107 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ActiveAgent
4
+ module GenerationProvider
5
+ module MessageFormatting
6
+ extend ActiveSupport::Concern
7
+
8
+ def provider_messages(messages)
9
+ messages.map do |message|
10
+ format_message(message)
11
+ end
12
+ end
13
+
14
+ protected
15
+
16
+ def format_message(message)
17
+ base_message = {
18
+ role: convert_role(message.role),
19
+ content: format_content(message)
20
+ }
21
+
22
+ add_tool_fields(base_message, message)
23
+ add_metadata_fields(base_message, message)
24
+
25
+ base_message.compact
26
+ end
27
+
28
+ def convert_role(role)
29
+ # Default role conversion - override in provider for specific mappings
30
+ role.to_s
31
+ end
32
+
33
+ def format_content(message)
34
+ # Handle multimodal content
35
+ case message.content_type
36
+ when "image_url"
37
+ format_image_content(message)
38
+ when "multipart/mixed", "array"
39
+ format_multimodal_content(message)
40
+ else
41
+ message.content
42
+ end
43
+ end
44
+
45
+ def format_image_content(message)
46
+ # Default implementation - override in provider
47
+ message.content
48
+ end
49
+
50
+ def format_multimodal_content(message)
51
+ # Default implementation for multimodal content
52
+ if message.content.is_a?(Array)
53
+ message.content.map do |item|
54
+ format_content_item(item)
55
+ end
56
+ else
57
+ message.content
58
+ end
59
+ end
60
+
61
+ def format_content_item(item)
62
+ # Format individual content items in multimodal messages
63
+ # Override in provider for specific formatting
64
+ item
65
+ end
66
+
67
+ def add_tool_fields(base_message, message)
68
+ # Add tool-specific fields based on role
69
+ case message.role.to_s
70
+ when "assistant"
71
+ if message.action_requested && message.requested_actions.any?
72
+ base_message[:tool_calls] = format_tool_calls(message.requested_actions)
73
+ elsif message.raw_actions.present? && message.raw_actions.is_a?(Array)
74
+ base_message[:tool_calls] = message.raw_actions
75
+ end
76
+ when "tool"
77
+ base_message[:tool_call_id] = message.action_id if message.action_id
78
+ base_message[:name] = message.action_name if message.action_name
79
+ end
80
+ end
81
+
82
+ def add_metadata_fields(base_message, message)
83
+ # Override to add provider-specific metadata
84
+ # For example: message IDs, timestamps, etc.
85
+ end
86
+
87
+ def format_tool_calls(actions)
88
+ # Default implementation - override in provider for specific format
89
+ actions.map do |action|
90
+ format_single_tool_call(action)
91
+ end
92
+ end
93
+
94
+ def format_single_tool_call(action)
95
+ # Default tool call format (OpenAI style)
96
+ {
97
+ type: "function",
98
+ function: {
99
+ name: action.name,
100
+ arguments: action.params.is_a?(String) ? action.params : action.params.to_json
101
+ },
102
+ id: action.id
103
+ }
104
+ end
105
+ end
106
+ end
107
+ end
@@ -1,5 +1,5 @@
1
1
  begin
2
- gem "ruby-openai", "~> 8.1.0"
2
+ gem "ruby-openai", "~> 8.2.0"
3
3
  require "openai"
4
4
  rescue LoadError
5
5
  raise LoadError, "The 'ruby-openai' gem is required for OpenAIProvider. Please add it to your Gemfile and run `bundle install`."
@@ -9,10 +9,16 @@ require "active_agent/action_prompt/action"
9
9
  require_relative "base"
10
10
  require_relative "response"
11
11
  require_relative "responses_adapter"
12
+ require_relative "stream_processing"
13
+ require_relative "message_formatting"
14
+ require_relative "tool_management"
12
15
 
13
16
  module ActiveAgent
14
17
  module GenerationProvider
15
18
  class OpenAIProvider < Base
19
+ include StreamProcessing
20
+ include MessageFormatting
21
+ include ToolManagement
16
22
  def initialize(config)
17
23
  super
18
24
  @host = config["host"] || nil
@@ -27,84 +33,58 @@ module ActiveAgent
27
33
  def generate(prompt)
28
34
  @prompt = prompt
29
35
 
30
- if @prompt.multimodal? || @prompt.content_type == "multipart/mixed"
31
- responses_prompt(parameters: responses_parameters)
32
- else
33
- chat_prompt(parameters: prompt_parameters)
36
+ with_error_handling do
37
+ if @prompt.multimodal? || @prompt.content_type == "multipart/mixed"
38
+ responses_prompt(parameters: responses_parameters)
39
+ else
40
+ chat_prompt(parameters: prompt_parameters)
41
+ end
34
42
  end
35
- rescue => e
36
- error_message = e.respond_to?(:message) ? e.message : e.to_s
37
- raise GenerationProviderError, error_message
38
43
  end
39
44
 
40
45
  def embed(prompt)
41
46
  @prompt = prompt
42
47
 
43
- embeddings_prompt(parameters: embeddings_parameters)
44
- rescue => e
45
- error_message = e.respond_to?(:message) ? e.message : e.to_s
46
- raise GenerationProviderError, error_message
48
+ with_error_handling do
49
+ embeddings_prompt(parameters: embeddings_parameters)
50
+ end
47
51
  end
48
52
 
49
- private
50
-
51
- def provider_stream
52
- agent_stream = prompt.options[:stream]
53
-
54
- message = ActiveAgent::ActionPrompt::Message.new(content: "", role: :assistant)
55
-
56
- @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
57
- proc do |chunk, bytesize|
58
- new_content = chunk.dig("choices", 0, "delta", "content")
59
- if new_content && !new_content.blank?
60
- message.generation_id = chunk.dig("id")
61
- message.content += new_content
62
-
63
- agent_stream.call(message, new_content, false, prompt.action_name) do |message, new_content|
64
- yield message, new_content if block_given?
65
- end
66
- elsif chunk.dig("choices", 0, "delta", "tool_calls") && chunk.dig("choices", 0, "delta", "role")
67
- message = handle_message(chunk.dig("choices", 0, "delta"))
68
- prompt.messages << message
69
- @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
70
- end
53
+ protected
54
+
55
+ # Override from StreamProcessing module
56
+ def process_stream_chunk(chunk, message, agent_stream)
57
+ new_content = chunk.dig("choices", 0, "delta", "content")
58
+ if new_content && !new_content.blank?
59
+ message.generation_id = chunk.dig("id")
60
+ message.content += new_content
61
+ # Call agent_stream directly without the block to avoid double execution
62
+ agent_stream&.call(message, new_content, false, prompt.action_name)
63
+ elsif chunk.dig("choices", 0, "delta", "tool_calls") && chunk.dig("choices", 0, "delta", "role")
64
+ message = handle_message(chunk.dig("choices", 0, "delta"))
65
+ prompt.messages << message
66
+ @response = ActiveAgent::GenerationProvider::Response.new(prompt:, message:)
67
+ end
71
68
 
72
- agent_stream.call(message, nil, true, prompt.action_name) do |message|
73
- yield message, nil if block_given?
74
- end
69
+ if chunk.dig("choices", 0, "finish_reason")
70
+ finalize_stream(message, agent_stream)
75
71
  end
76
72
  end
77
73
 
78
- def prompt_parameters(model: @prompt.options[:model] || @model_name, messages: @prompt.messages, temperature: @prompt.options[:temperature] || @config["temperature"] || 0.7, tools: @prompt.actions)
79
- {
80
- model: model,
81
- messages: provider_messages(messages),
82
- temperature: temperature,
83
- max_tokens: @prompt.options[:max_tokens] || @config["max_tokens"],
84
- tools: tools.presence
85
- }.compact
74
+ # Override from MessageFormatting module to handle OpenAI image format
75
+ def format_image_content(message)
76
+ [ {
77
+ type: "image_url",
78
+ image_url: { url: message.content }
79
+ } ]
86
80
  end
87
81
 
88
- def provider_messages(messages)
89
- messages.map do |message|
90
- provider_message = {
91
- role: message.role,
92
- tool_call_id: message.action_id.presence,
93
- name: message.action_name.presence,
94
- tool_calls: message.raw_actions.present? ? message.raw_actions[:tool_calls] : (message.requested_actions.map { |action| { type: "function", name: action.name, arguments: action.params.to_json } } if message.action_requested),
95
- generation_id: message.generation_id,
96
- content: message.content,
97
- type: message.content_type,
98
- charset: message.charset
99
- }.compact
100
-
101
- if message.content_type == "image_url" || message.content[0..4] == "data:"
102
- provider_message[:type] = "image_url"
103
- provider_message[:image_url] = { url: message.content }
104
- end
105
- provider_message
106
- end
107
- end
82
+ private
83
+
84
+ # Now using modules, but we can override build_provider_parameters for OpenAI-specific needs
85
+ # The prompt_parameters method comes from ParameterBuilder module
86
+ # The format_tools method comes from ToolManagement module
87
+ # The provider_messages method comes from MessageFormatting module
108
88
 
109
89
  def chat_response(response)
110
90
  return @response if prompt.options[:stream]
@@ -118,7 +98,7 @@ module ActiveAgent
118
98
  end
119
99
 
120
100
  def responses_response(response)
121
- message_json = response.dig("output", 0)
101
+ message_json = response["output"].find { |output_item| output_item["type"] == "message" }
122
102
  message_json["id"] = response.dig("id") if message_json["id"].blank?
123
103
 
124
104
  message = ActiveAgent::ActionPrompt::Message.new(
@@ -144,20 +124,7 @@ module ActiveAgent
144
124
  )
145
125
  end
146
126
 
147
- def handle_actions(tool_calls)
148
- return [] if tool_calls.nil? || tool_calls.empty?
149
-
150
- tool_calls.map do |tool_call|
151
- next if tool_call["function"].nil? || tool_call["function"]["name"].blank?
152
- args = tool_call["function"]["arguments"].blank? ? nil : JSON.parse(tool_call["function"]["arguments"], { symbolize_names: true })
153
-
154
- ActiveAgent::ActionPrompt::Action.new(
155
- id: tool_call["id"],
156
- name: tool_call.dig("function", "name"),
157
- params: args
158
- )
159
- end.compact
160
- end
127
+ # handle_actions is now provided by ToolManagement module
161
128
 
162
129
  def chat_prompt(parameters: prompt_parameters)
163
130
  parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"]