activeagent 0.1.1 → 0.2.6.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/active_agent/action_prompt/README.md +92 -25
- data/lib/active_agent/action_prompt/action.rb +13 -0
- data/lib/active_agent/action_prompt/base.rb +127 -0
- data/lib/active_agent/action_prompt/message.rb +18 -16
- data/lib/active_agent/action_prompt/prompt.rb +14 -15
- data/lib/active_agent/base.rb +96 -58
- data/lib/active_agent/callbacks.rb +13 -0
- data/lib/active_agent/generation.rb +3 -3
- data/lib/active_agent/generation_job.rb +1 -1
- data/lib/active_agent/generation_provider/README.md +63 -8
- data/lib/active_agent/generation_provider/anthropic_provider.rb +142 -0
- data/lib/active_agent/generation_provider/base.rb +7 -2
- data/lib/active_agent/generation_provider/open_ai_provider.rb +95 -24
- data/lib/active_agent/generation_provider.rb +1 -2
- data/lib/active_agent/operation.rb +3 -3
- data/lib/active_agent/queued_generation.rb +1 -1
- data/lib/active_agent/railtie.rb +9 -11
- data/lib/active_agent/service.rb +1 -1
- data/lib/active_agent/version.rb +1 -1
- data/lib/active_agent.rb +7 -3
- data/lib/activeagent.rb +1 -0
- data/lib/generators/active_agent/agent_generator.rb +22 -22
- data/lib/generators/active_agent/install_generator.rb +21 -0
- data/lib/generators/active_agent/templates/active_agent.yml +6 -0
- data/lib/generators/active_agent/templates/agent.rb.tt +1 -1
- data/lib/generators/active_agent/templates/agent.text.erb +1 -0
- data/lib/generators/active_agent/templates/application_agent.rb.tt +7 -0
- metadata +65 -20
- data/README.md +0 -153
- data/Rakefile +0 -3
data/lib/active_agent/base.rb
CHANGED
@@ -67,11 +67,7 @@ module ActiveAgent
|
|
67
67
|
}.freeze
|
68
68
|
|
69
69
|
class << self
|
70
|
-
|
71
|
-
new.prompt(...)
|
72
|
-
end
|
73
|
-
|
74
|
-
# Register one or more Observers which will be notified when mail is delivered.
|
70
|
+
# Register one or more Observers which will be notified when prompt is generated.
|
75
71
|
def register_observers(*observers)
|
76
72
|
observers.flatten.compact.each { |observer| register_observer(observer) }
|
77
73
|
end
|
@@ -81,7 +77,7 @@ module ActiveAgent
|
|
81
77
|
observers.flatten.compact.each { |observer| unregister_observer(observer) }
|
82
78
|
end
|
83
79
|
|
84
|
-
# Register one or more Interceptors which will be called before
|
80
|
+
# Register one or more Interceptors which will be called before prompt is sent.
|
85
81
|
def register_interceptors(*interceptors)
|
86
82
|
interceptors.flatten.compact.each { |interceptor| register_interceptor(interceptor) }
|
87
83
|
end
|
@@ -91,32 +87,32 @@ module ActiveAgent
|
|
91
87
|
interceptors.flatten.compact.each { |interceptor| unregister_interceptor(interceptor) }
|
92
88
|
end
|
93
89
|
|
94
|
-
# Register an Observer which will be notified when
|
90
|
+
# Register an Observer which will be notified when prompt is generated.
|
95
91
|
# Either a class, string, or symbol can be passed in as the Observer.
|
96
92
|
# If a string or symbol is passed in it will be camelized and constantized.
|
97
93
|
def register_observer(observer)
|
98
|
-
|
94
|
+
Prompt.register_observer(observer_class_for(observer))
|
99
95
|
end
|
100
96
|
|
101
97
|
# Unregister a previously registered Observer.
|
102
98
|
# Either a class, string, or symbol can be passed in as the Observer.
|
103
99
|
# If a string or symbol is passed in it will be camelized and constantized.
|
104
100
|
def unregister_observer(observer)
|
105
|
-
|
101
|
+
Prompt.unregister_observer(observer_class_for(observer))
|
106
102
|
end
|
107
103
|
|
108
|
-
# Register an Interceptor which will be called before
|
104
|
+
# Register an Interceptor which will be called before prompt is sent.
|
109
105
|
# Either a class, string, or symbol can be passed in as the Interceptor.
|
110
106
|
# If a string or symbol is passed in it will be camelized and constantized.
|
111
107
|
def register_interceptor(interceptor)
|
112
|
-
|
108
|
+
Prompt.register_interceptor(observer_class_for(interceptor))
|
113
109
|
end
|
114
110
|
|
115
111
|
# Unregister a previously registered Interceptor.
|
116
112
|
# Either a class, string, or symbol can be passed in as the Interceptor.
|
117
113
|
# If a string or symbol is passed in it will be camelized and constantized.
|
118
114
|
def unregister_interceptor(interceptor)
|
119
|
-
|
115
|
+
Prompt.unregister_interceptor(observer_class_for(interceptor))
|
120
116
|
end
|
121
117
|
|
122
118
|
def observer_class_for(value) # :nodoc:
|
@@ -133,6 +129,8 @@ module ActiveAgent
|
|
133
129
|
def generate_with(provider, **options)
|
134
130
|
self.generation_provider = provider
|
135
131
|
self.options = (options || {}).merge(options)
|
132
|
+
self.options[:stream] = new.agent_stream if self.options[:stream]
|
133
|
+
generation_provider.config.merge!(self.options)
|
136
134
|
end
|
137
135
|
|
138
136
|
def stream_with(&stream)
|
@@ -179,13 +177,8 @@ module ActiveAgent
|
|
179
177
|
|
180
178
|
def set_payload_for_prompt(payload, prompt)
|
181
179
|
payload[:prompt] = prompt.encoded
|
182
|
-
payload[:agent] =
|
180
|
+
payload[:agent] = agent_name
|
183
181
|
payload[:message_id] = prompt.message_id
|
184
|
-
payload[:subject] = prompt.subject
|
185
|
-
payload[:to] = prompt.to
|
186
|
-
payload[:from] = prompt.from
|
187
|
-
payload[:bcc] = prompt.bcc if prompt.bcc.present?
|
188
|
-
payload[:cc] = prompt.cc if prompt.cc.present?
|
189
182
|
payload[:date] = prompt.date
|
190
183
|
payload[:perform_generations] = prompt.perform_generations
|
191
184
|
end
|
@@ -203,17 +196,56 @@ module ActiveAgent
|
|
203
196
|
end
|
204
197
|
end
|
205
198
|
|
206
|
-
attr_internal :
|
199
|
+
attr_internal :prompt_context
|
200
|
+
|
201
|
+
def agent_stream
|
202
|
+
proc do |message, delta, stop|
|
203
|
+
run_stream_callbacks(message, delta, stop) do |message, delta, stop|
|
204
|
+
yield message, delta, stop if block_given?
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
def embed
|
210
|
+
prompt_context.options.merge(options)
|
211
|
+
generation_provider.embed(prompt_context) if prompt_context && generation_provider
|
212
|
+
handle_response(generation_provider.response)
|
213
|
+
end
|
207
214
|
|
208
215
|
def perform_generation
|
209
|
-
|
210
|
-
generation_provider.generate(
|
216
|
+
prompt_context.options.merge(options)
|
217
|
+
generation_provider.generate(prompt_context) if prompt_context && generation_provider
|
218
|
+
handle_response(generation_provider.response)
|
219
|
+
end
|
220
|
+
|
221
|
+
def handle_response(response)
|
222
|
+
perform_actions(requested_actions: response.message.requested_actions) if response.message.requested_actions.present?
|
223
|
+
|
224
|
+
update_prompt_context(response)
|
225
|
+
end
|
226
|
+
|
227
|
+
def update_prompt_context(response)
|
228
|
+
# response.prompt = prompt_context
|
229
|
+
# response.message = response.messages.last
|
230
|
+
response
|
231
|
+
end
|
232
|
+
|
233
|
+
def perform_actions(requested_actions:)
|
234
|
+
requested_actions.each do |action|
|
235
|
+
perform_action(action)
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
def perform_action(action)
|
240
|
+
process(action.name, *action.params)
|
241
|
+
prompt_context.messages.last.role = :tool
|
242
|
+
prompt_context.messages.last.action_id = action.id
|
211
243
|
end
|
212
244
|
|
213
245
|
def initialize
|
214
246
|
super
|
215
247
|
@_prompt_was_called = false
|
216
|
-
@
|
248
|
+
@_prompt_context = ActiveAgent::ActionPrompt::Prompt.new(instructions: options[:instructions], options: options)
|
217
249
|
end
|
218
250
|
|
219
251
|
def process(method_name, *args) # :nodoc:
|
@@ -225,7 +257,7 @@ module ActiveAgent
|
|
225
257
|
|
226
258
|
ActiveSupport::Notifications.instrument("process.active_agent", payload) do
|
227
259
|
super
|
228
|
-
@
|
260
|
+
@_prompt_context = ActiveAgent::ActionPrompt::Prompt.new unless @_prompt_was_called
|
229
261
|
end
|
230
262
|
end
|
231
263
|
ruby2_keywords(:process)
|
@@ -255,20 +287,12 @@ module ActiveAgent
|
|
255
287
|
|
256
288
|
def headers(args = nil)
|
257
289
|
if args
|
258
|
-
@
|
290
|
+
@_prompt_context.headers(args)
|
259
291
|
else
|
260
|
-
@
|
292
|
+
@_prompt_context
|
261
293
|
end
|
262
294
|
end
|
263
295
|
|
264
|
-
# def attachments
|
265
|
-
# if @_prompt_was_called
|
266
|
-
# LateAttachmentsProxy.new(@_context.attachments)
|
267
|
-
# else
|
268
|
-
# @_context.attachments
|
269
|
-
# end
|
270
|
-
# end
|
271
|
-
|
272
296
|
class LateAttachmentsProxy < SimpleDelegator
|
273
297
|
def inline
|
274
298
|
self
|
@@ -286,31 +310,39 @@ module ActiveAgent
|
|
286
310
|
end
|
287
311
|
end
|
288
312
|
|
313
|
+
def prompt_with(*)
|
314
|
+
prompt_context.update_prompt_context(*)
|
315
|
+
end
|
316
|
+
|
289
317
|
def prompt(headers = {}, &block)
|
290
|
-
return
|
318
|
+
return prompt_context if @_prompt_was_called && headers.blank? && !block
|
291
319
|
|
292
320
|
content_type = headers[:content_type]
|
293
321
|
|
294
322
|
headers = apply_defaults(headers)
|
295
323
|
|
296
|
-
|
324
|
+
prompt_context.context_id = headers[:context_id]
|
325
|
+
|
326
|
+
prompt_context.charset = charset = headers[:charset]
|
297
327
|
|
298
328
|
responses = collect_responses(headers, &block)
|
329
|
+
|
299
330
|
@_prompt_was_called = true
|
300
331
|
|
301
|
-
create_parts_from_responses(
|
332
|
+
create_parts_from_responses(prompt_context, responses)
|
302
333
|
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
context
|
334
|
+
prompt_context.content_type = set_content_type(prompt_context, content_type, headers[:content_type])
|
335
|
+
prompt_context.charset = charset
|
336
|
+
prompt_context.actions = headers[:actions] || action_schemas
|
337
|
+
prompt_context
|
308
338
|
end
|
309
|
-
|
339
|
+
|
310
340
|
def action_schemas
|
311
341
|
action_methods.map do |action|
|
312
|
-
|
313
|
-
|
342
|
+
if action != "text_prompt"
|
343
|
+
JSON.parse render_to_string(locals: {action_name: action}, action: action, formats: :json)
|
344
|
+
end
|
345
|
+
end.compact
|
314
346
|
end
|
315
347
|
|
316
348
|
private
|
@@ -319,7 +351,7 @@ module ActiveAgent
|
|
319
351
|
if user_content_type.present?
|
320
352
|
user_content_type
|
321
353
|
else
|
322
|
-
|
354
|
+
prompt_context.content_type || class_default
|
323
355
|
end
|
324
356
|
end
|
325
357
|
|
@@ -350,10 +382,10 @@ module ActiveAgent
|
|
350
382
|
end
|
351
383
|
end
|
352
384
|
|
353
|
-
def
|
385
|
+
def assign_headers_to_prompt_context(prompt_context, headers)
|
354
386
|
assignable = headers.except(:parts_order, :content_type, :body, :template_name,
|
355
387
|
:template_path, :delivery_method, :delivery_method_options)
|
356
|
-
assignable.each { |k, v|
|
388
|
+
assignable.each { |k, v| prompt_context[k] = v }
|
357
389
|
end
|
358
390
|
|
359
391
|
def collect_responses(headers, &)
|
@@ -385,12 +417,14 @@ module ActiveAgent
|
|
385
417
|
templates_name = headers[:template_name] || action_name
|
386
418
|
|
387
419
|
each_template(Array(templates_path), templates_name).map do |template|
|
420
|
+
next if template.format == :json
|
421
|
+
|
388
422
|
format = template.format || formats.first
|
389
423
|
{
|
390
424
|
body: render(template: template, formats: [format]),
|
391
425
|
content_type: Mime[format].to_s
|
392
426
|
}
|
393
|
-
end
|
427
|
+
end.compact
|
394
428
|
end
|
395
429
|
|
396
430
|
def each_template(paths, name, &)
|
@@ -402,20 +436,24 @@ module ActiveAgent
|
|
402
436
|
end
|
403
437
|
end
|
404
438
|
|
405
|
-
def create_parts_from_responses(
|
406
|
-
if responses.size > 1
|
407
|
-
prompt_container = ActiveAgent::ActionPrompt::Prompt.new
|
408
|
-
prompt_container.content_type = "multipart/alternative"
|
409
|
-
responses.each { |r| insert_part(
|
410
|
-
|
439
|
+
def create_parts_from_responses(prompt_context, responses)
|
440
|
+
if responses.size > 1
|
441
|
+
# prompt_container = ActiveAgent::ActionPrompt::Prompt.new
|
442
|
+
# prompt_container.content_type = "multipart/alternative"
|
443
|
+
responses.each { |r| insert_part(prompt_context, r, prompt_context.charset) }
|
444
|
+
# prompt_context.add_part(prompt_container)
|
411
445
|
else
|
412
|
-
responses.each { |r| insert_part(
|
446
|
+
responses.each { |r| insert_part(prompt_context, r, prompt_context.charset) }
|
413
447
|
end
|
414
448
|
end
|
415
449
|
|
416
|
-
def insert_part(
|
417
|
-
|
418
|
-
|
450
|
+
def insert_part(prompt_context, response, charset)
|
451
|
+
message = ActiveAgent::ActionPrompt::Message.new(
|
452
|
+
content: response[:body],
|
453
|
+
content_type: response[:content_type],
|
454
|
+
charset: charset
|
455
|
+
)
|
456
|
+
prompt_context.add_part(message)
|
419
457
|
end
|
420
458
|
|
421
459
|
# This and #instrument_name is for caching instrument
|
@@ -7,6 +7,7 @@ module ActiveAgent
|
|
7
7
|
included do
|
8
8
|
include ActiveSupport::Callbacks
|
9
9
|
define_callbacks :generate, skip_after_callbacks_if_terminated: true
|
10
|
+
define_callbacks :stream, skip_after_callbacks_if_terminated: true
|
10
11
|
end
|
11
12
|
|
12
13
|
module ClassMethods
|
@@ -26,6 +27,18 @@ module ActiveAgent
|
|
26
27
|
def around_generate(*filters, &)
|
27
28
|
set_callback(:generate, :around, *filters, &)
|
28
29
|
end
|
30
|
+
|
31
|
+
# Defines a callback for handling streaming responses during generation
|
32
|
+
def on_stream(*filters, &)
|
33
|
+
set_callback(:stream, :before, *filters, &)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
# Helper method to run stream callbacks
|
38
|
+
def run_stream_callbacks(message, delta = nil, stop = false)
|
39
|
+
run_callbacks(:stream) do
|
40
|
+
yield(message, delta, stop) if block_given?
|
41
|
+
end
|
29
42
|
end
|
30
43
|
end
|
31
44
|
end
|
@@ -11,14 +11,14 @@ module ActiveAgent
|
|
11
11
|
ruby2_keywords(:initialize)
|
12
12
|
|
13
13
|
def __getobj__
|
14
|
-
@prompt_context ||= processed_agent.
|
14
|
+
@prompt_context ||= processed_agent.prompt_context
|
15
15
|
end
|
16
16
|
|
17
17
|
def __setobj__(prompt_context)
|
18
18
|
@prompt_context = prompt_context
|
19
19
|
end
|
20
20
|
|
21
|
-
def
|
21
|
+
def prompt_context
|
22
22
|
__getobj__
|
23
23
|
end
|
24
24
|
|
@@ -37,7 +37,7 @@ module ActiveAgent
|
|
37
37
|
def generate_now!
|
38
38
|
processed_agent.handle_exceptions do
|
39
39
|
processed_agent.run_callbacks(:generate) do
|
40
|
-
processed_agent
|
40
|
+
processed_agent.perform_generation!
|
41
41
|
end
|
42
42
|
end
|
43
43
|
end
|
@@ -1,17 +1,72 @@
|
|
1
1
|
# Active Agent: Generation Provider
|
2
2
|
|
3
|
-
This README provides information about the
|
3
|
+
This README provides information about the generation provider interfaces and implementations in the ActiveAgent library.
|
4
4
|
|
5
5
|
## Main Components
|
6
6
|
|
7
|
-
Base class -
|
8
|
-
|
7
|
+
- Base class - Abstract class for implementing generation providers
|
8
|
+
- OpenAI Provider - Reference implementation using OpenAI's API
|
9
|
+
- Response class - Standardized response wrapper
|
10
|
+
- Module - For including generation provider functionality in agents
|
9
11
|
|
10
|
-
|
12
|
+
## Core Concepts
|
11
13
|
|
12
|
-
|
14
|
+
### Base Provider Class
|
13
15
|
|
14
|
-
|
16
|
+
The `ActiveAgent::GenerationProvider::Base` class defines the core interface that all providers must implement:
|
15
17
|
|
16
|
-
|
17
|
-
|
18
|
+
```ruby
|
19
|
+
def generate(prompt)
|
20
|
+
raise NotImplementedError
|
21
|
+
end
|
22
|
+
```
|
23
|
+
|
24
|
+
### OpenAI Provider Implementation
|
25
|
+
|
26
|
+
The OpenAI provider shows how to implement a concrete generation provider:
|
27
|
+
|
28
|
+
- Handles authentication and client setup
|
29
|
+
- Implements prompt/completion generation
|
30
|
+
- Supports streaming responses
|
31
|
+
- Handles embeddings generation
|
32
|
+
- Manages context updates
|
33
|
+
- Processes tool/action calls
|
34
|
+
|
35
|
+
### Provider Features
|
36
|
+
|
37
|
+
- Configuration - Providers accept config options for API keys, models, etc
|
38
|
+
- Streaming - Optional streaming support for realtime responses
|
39
|
+
- Action handling - Support for function/tool calling
|
40
|
+
- Error handling - Standardized error handling via GenerationProviderError
|
41
|
+
- Context management - Tracks conversation context and message history
|
42
|
+
|
43
|
+
### Response Handling
|
44
|
+
|
45
|
+
The Response class wraps provider responses with a consistent interface:
|
46
|
+
|
47
|
+
```ruby
|
48
|
+
Response.new(
|
49
|
+
prompt: prompt, # Original prompt
|
50
|
+
message: message, # Generated response
|
51
|
+
raw_response: raw # Provider-specific response
|
52
|
+
)
|
53
|
+
```
|
54
|
+
|
55
|
+
## Usage Example
|
56
|
+
|
57
|
+
```ruby
|
58
|
+
# Configure provider
|
59
|
+
provider = ActiveAgent::GenerationProvider::OpenAIProvider.new(
|
60
|
+
"api_key" => ENV["OPENAI_API_KEY"],
|
61
|
+
"model" => "gpt-4"
|
62
|
+
)
|
63
|
+
|
64
|
+
# Generate completion
|
65
|
+
response = provider.generate(prompt)
|
66
|
+
|
67
|
+
# Access response
|
68
|
+
response.message.content # Generated text
|
69
|
+
response.raw_response # Raw provider response
|
70
|
+
```
|
71
|
+
|
72
|
+
See the OpenAI provider implementation for a complete reference example.
|
@@ -0,0 +1,142 @@
|
|
1
|
+
# lib/active_agent/generation_provider/anthropic_provider.rb
|
2
|
+
|
3
|
+
require "anthropic"
|
4
|
+
require "active_agent/action_prompt/action"
|
5
|
+
require_relative "base"
|
6
|
+
require_relative "response"
|
7
|
+
|
8
|
+
module ActiveAgent
|
9
|
+
module GenerationProvider
|
10
|
+
class AnthropicProvider < Base
|
11
|
+
def initialize(config)
|
12
|
+
super
|
13
|
+
@api_key = config["api_key"]
|
14
|
+
@model_name = config["model"] || "claude-3-5-sonnet-20240620"
|
15
|
+
@client = Anthropic::Client.new(access_token: @api_key)
|
16
|
+
end
|
17
|
+
|
18
|
+
def generate(prompt)
|
19
|
+
@prompt = prompt
|
20
|
+
|
21
|
+
chat_prompt(parameters: prompt_parameters)
|
22
|
+
rescue => e
|
23
|
+
raise GenerationProviderError, e.message
|
24
|
+
end
|
25
|
+
|
26
|
+
def chat_prompt(parameters: prompt_parameters)
|
27
|
+
parameters[:stream] = provider_stream if prompt.options[:stream] || config["stream"]
|
28
|
+
|
29
|
+
chat_response(@client.messages(parameters))
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def provider_stream
|
35
|
+
agent_stream = prompt.options[:stream]
|
36
|
+
message = ActiveAgent::ActionPrompt::Message.new(content: "", role: :assistant)
|
37
|
+
@response = ActiveAgent::GenerationProvider::Response.new(prompt: prompt, message:)
|
38
|
+
|
39
|
+
proc do |chunk|
|
40
|
+
if new_content = chunk.dig(:delta, :text)
|
41
|
+
message.content += new_content
|
42
|
+
agent_stream.call(message) if agent_stream.respond_to?(:call)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def prompt_parameters(model: @prompt.options[:model] || @model_name, messages: @prompt.messages, temperature: @config["temperature"] || 0.7, tools: @prompt.actions)
|
48
|
+
params = {
|
49
|
+
model: model,
|
50
|
+
messages: provider_messages(messages),
|
51
|
+
temperature: temperature,
|
52
|
+
max_tokens: 4096
|
53
|
+
}
|
54
|
+
|
55
|
+
if tools&.present?
|
56
|
+
params[:tools] = format_tools(tools)
|
57
|
+
end
|
58
|
+
|
59
|
+
params
|
60
|
+
end
|
61
|
+
|
62
|
+
def format_tools(tools)
|
63
|
+
tools.map do |tool|
|
64
|
+
{
|
65
|
+
name: tool[:name] || tool[:function][:name],
|
66
|
+
description: tool[:description],
|
67
|
+
input_schema: tool[:parameters]
|
68
|
+
}
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def provider_messages(messages)
|
73
|
+
messages.map do |message|
|
74
|
+
provider_message = {
|
75
|
+
role: convert_role(message.role),
|
76
|
+
content: []
|
77
|
+
}
|
78
|
+
|
79
|
+
provider_message[:content] << if message.content_type == "image_url"
|
80
|
+
{
|
81
|
+
type: "image",
|
82
|
+
source: {
|
83
|
+
type: "url",
|
84
|
+
url: message.content
|
85
|
+
}
|
86
|
+
}
|
87
|
+
else
|
88
|
+
{
|
89
|
+
type: "text",
|
90
|
+
text: message.content
|
91
|
+
}
|
92
|
+
end
|
93
|
+
|
94
|
+
provider_message
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
def convert_role(role)
|
99
|
+
case role.to_s
|
100
|
+
when "system" then "system"
|
101
|
+
when "user" then "user"
|
102
|
+
when "assistant" then "assistant"
|
103
|
+
when "tool", "function" then "assistant"
|
104
|
+
else "user"
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def chat_response(response)
|
109
|
+
return @response if prompt.options[:stream]
|
110
|
+
|
111
|
+
content = response.content.first[:text]
|
112
|
+
|
113
|
+
message = ActiveAgent::ActionPrompt::Message.new(
|
114
|
+
content: content,
|
115
|
+
role: "assistant",
|
116
|
+
action_requested: response.stop_reason == "tool_use",
|
117
|
+
requested_actions: handle_actions(response.tool_use)
|
118
|
+
)
|
119
|
+
|
120
|
+
update_context(prompt: prompt, message: message, response: response)
|
121
|
+
|
122
|
+
@response = ActiveAgent::GenerationProvider::Response.new(
|
123
|
+
prompt: prompt,
|
124
|
+
message: message,
|
125
|
+
raw_response: response
|
126
|
+
)
|
127
|
+
end
|
128
|
+
|
129
|
+
def handle_actions(tool_uses)
|
130
|
+
return unless tool_uses&.present?
|
131
|
+
|
132
|
+
tool_uses.map do |tool_use|
|
133
|
+
ActiveAgent::ActionPrompt::Action.new(
|
134
|
+
id: tool_use[:id],
|
135
|
+
name: tool_use[:name],
|
136
|
+
params: tool_use[:input]
|
137
|
+
)
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
@@ -4,7 +4,7 @@ module ActiveAgent
|
|
4
4
|
module GenerationProvider
|
5
5
|
class Base
|
6
6
|
class GenerationProviderError < StandardError; end
|
7
|
-
attr_reader :client, :config, :prompt
|
7
|
+
attr_reader :client, :config, :prompt, :response
|
8
8
|
|
9
9
|
def initialize(config)
|
10
10
|
@config = config
|
@@ -19,10 +19,15 @@ module ActiveAgent
|
|
19
19
|
private
|
20
20
|
|
21
21
|
def handle_response(response)
|
22
|
-
ActiveAgent::GenerationProvider::Response.new(message:, raw_response: response)
|
22
|
+
@response = ActiveAgent::GenerationProvider::Response.new(message:, raw_response: response)
|
23
23
|
raise NotImplementedError, "Subclasses must implement the 'handle_response' method"
|
24
24
|
end
|
25
25
|
|
26
|
+
def update_context(prompt:, message:, response:)
|
27
|
+
prompt.message = message
|
28
|
+
prompt.messages << message
|
29
|
+
end
|
30
|
+
|
26
31
|
protected
|
27
32
|
|
28
33
|
def prompt_parameters
|