aia 0.9.19 → 0.9.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.version +1 -1
- data/CHANGELOG.md +151 -91
- data/README.md +128 -3
- data/docs/cli-reference.md +71 -4
- data/docs/guides/models.md +196 -1
- data/lib/aia/config/base.rb +6 -1
- data/lib/aia/config/cli_parser.rb +116 -2
- data/lib/aia/config/file_loader.rb +33 -1
- data/lib/aia/prompt_handler.rb +22 -1
- data/lib/aia/ruby_llm_adapter.rb +134 -30
- data/lib/aia/session.rb +24 -8
- data/lib/aia/utility.rb +19 -1
- metadata +1 -1
data/lib/aia/prompt_handler.rb
CHANGED
@@ -101,7 +101,7 @@ module AIA
|
|
101
101
|
def fetch_role(role_id)
|
102
102
|
# Handle nil role_id
|
103
103
|
return handle_missing_role("roles/") if role_id.nil?
|
104
|
-
|
104
|
+
|
105
105
|
# Prepend roles_prefix if not already present
|
106
106
|
unless role_id.start_with?(AIA.config.roles_prefix)
|
107
107
|
role_id = "#{AIA.config.roles_prefix}/#{role_id}"
|
@@ -126,6 +126,27 @@ module AIA
|
|
126
126
|
handle_missing_role(role_id)
|
127
127
|
end
|
128
128
|
|
129
|
+
# Load role for a specific model (ADR-005)
|
130
|
+
# Takes a model spec hash and default role, returns role text
|
131
|
+
def load_role_for_model(model_spec, default_role = nil)
|
132
|
+
# Determine which role to use
|
133
|
+
role_id = if model_spec.is_a?(Hash)
|
134
|
+
model_spec[:role] || default_role
|
135
|
+
else
|
136
|
+
# Backward compatibility: if model_spec is a string, use default role
|
137
|
+
default_role
|
138
|
+
end
|
139
|
+
|
140
|
+
return nil if role_id.nil? || role_id.empty?
|
141
|
+
|
142
|
+
# Load the role using existing fetch_role method
|
143
|
+
role_prompt = fetch_role(role_id)
|
144
|
+
role_prompt.text
|
145
|
+
rescue => e
|
146
|
+
puts "Warning: Could not load role '#{role_id}' for model: #{e.message}"
|
147
|
+
nil
|
148
|
+
end
|
149
|
+
|
129
150
|
def handle_missing_role(role_id)
|
130
151
|
# Handle empty/nil role_id
|
131
152
|
role_id = role_id.to_s.strip
|
data/lib/aia/ruby_llm_adapter.rb
CHANGED
@@ -5,10 +5,11 @@ require_relative '../extensions/ruby_llm/provider_fix'
|
|
5
5
|
|
6
6
|
module AIA
|
7
7
|
class RubyLLMAdapter
|
8
|
-
attr_reader :tools
|
8
|
+
attr_reader :tools, :model_specs
|
9
9
|
|
10
10
|
def initialize
|
11
|
-
@
|
11
|
+
@model_specs = extract_models_config # Full specs with role info
|
12
|
+
@models = extract_model_names(@model_specs) # Just model names for backward compat
|
12
13
|
@chats = {}
|
13
14
|
@contexts = {} # Store isolated contexts for each model
|
14
15
|
|
@@ -115,9 +116,13 @@ module AIA
|
|
115
116
|
def setup_chats_with_tools
|
116
117
|
valid_chats = {}
|
117
118
|
valid_contexts = {}
|
119
|
+
valid_specs = []
|
118
120
|
failed_models = []
|
119
121
|
|
120
|
-
@
|
122
|
+
@model_specs.each do |spec|
|
123
|
+
model_name = spec[:model] # Actual model name (e.g., "gpt-4o")
|
124
|
+
internal_id = spec[:internal_id] # Key for storage (e.g., "gpt-4o#1", "gpt-4o#2")
|
125
|
+
|
121
126
|
begin
|
122
127
|
# Create isolated context for this model to prevent cross-talk (ADR-002)
|
123
128
|
context = create_isolated_context_for_model(model_name)
|
@@ -138,10 +143,11 @@ module AIA
|
|
138
143
|
context.chat(model: actual_model)
|
139
144
|
end
|
140
145
|
|
141
|
-
valid_chats[
|
142
|
-
valid_contexts[
|
146
|
+
valid_chats[internal_id] = chat
|
147
|
+
valid_contexts[internal_id] = context
|
148
|
+
valid_specs << spec
|
143
149
|
rescue StandardError => e
|
144
|
-
failed_models << "#{
|
150
|
+
failed_models << "#{internal_id}: #{e.message}"
|
145
151
|
end
|
146
152
|
end
|
147
153
|
|
@@ -160,10 +166,11 @@ module AIA
|
|
160
166
|
|
161
167
|
@chats = valid_chats
|
162
168
|
@contexts = valid_contexts
|
169
|
+
@model_specs = valid_specs
|
163
170
|
@models = valid_chats.keys
|
164
171
|
|
165
|
-
# Update the config to reflect only the valid models
|
166
|
-
AIA.config.model = @
|
172
|
+
# Update the config to reflect only the valid models (keep as specs)
|
173
|
+
AIA.config.model = @model_specs
|
167
174
|
|
168
175
|
# Report successful models
|
169
176
|
if failed_models.any?
|
@@ -279,29 +286,71 @@ module AIA
|
|
279
286
|
result
|
280
287
|
end
|
281
288
|
|
282
|
-
def single_model_chat(prompt,
|
283
|
-
chat_instance = @chats[
|
289
|
+
def single_model_chat(prompt, internal_id)
|
290
|
+
chat_instance = @chats[internal_id]
|
284
291
|
modes = chat_instance.model.modalities
|
285
292
|
|
286
293
|
# TODO: Need to consider how to handle multi-mode models
|
287
294
|
result = if modes.text_to_text?
|
288
|
-
text_to_text_single(prompt,
|
295
|
+
text_to_text_single(prompt, internal_id)
|
289
296
|
elsif modes.image_to_text?
|
290
|
-
image_to_text_single(prompt,
|
297
|
+
image_to_text_single(prompt, internal_id)
|
291
298
|
elsif modes.text_to_image?
|
292
|
-
text_to_image_single(prompt,
|
299
|
+
text_to_image_single(prompt, internal_id)
|
293
300
|
elsif modes.text_to_audio?
|
294
|
-
text_to_audio_single(prompt,
|
301
|
+
text_to_audio_single(prompt, internal_id)
|
295
302
|
elsif modes.audio_to_text?
|
296
|
-
audio_to_text_single(prompt,
|
303
|
+
audio_to_text_single(prompt, internal_id)
|
297
304
|
else
|
298
305
|
# TODO: what else can be done?
|
299
|
-
"Error: No matching modality for model #{
|
306
|
+
"Error: No matching modality for model #{internal_id}"
|
300
307
|
end
|
301
308
|
|
302
309
|
result
|
303
310
|
end
|
304
311
|
|
312
|
+
# Prepend role content to prompt for a specific model (ADR-005)
|
313
|
+
def prepend_model_role(prompt, internal_id)
|
314
|
+
# Get model spec to find role
|
315
|
+
spec = get_model_spec(internal_id)
|
316
|
+
return prompt unless spec && spec[:role]
|
317
|
+
|
318
|
+
# Get role content using PromptHandler
|
319
|
+
# Need to create PromptHandler instance if not already available
|
320
|
+
prompt_handler = AIA::PromptHandler.new
|
321
|
+
role_content = prompt_handler.load_role_for_model(spec, AIA.config.role)
|
322
|
+
|
323
|
+
return prompt unless role_content
|
324
|
+
|
325
|
+
# Prepend role to prompt based on prompt type
|
326
|
+
if prompt.is_a?(String)
|
327
|
+
# Simple string prompt
|
328
|
+
"#{role_content}\n\n#{prompt}"
|
329
|
+
elsif prompt.is_a?(Array)
|
330
|
+
# Conversation array - prepend to first user message
|
331
|
+
prepend_role_to_conversation(prompt, role_content)
|
332
|
+
else
|
333
|
+
prompt
|
334
|
+
end
|
335
|
+
end
|
336
|
+
|
337
|
+
def prepend_role_to_conversation(conversation, role_content)
|
338
|
+
# Find the first user message and prepend role
|
339
|
+
modified = conversation.dup
|
340
|
+
first_user_index = modified.find_index { |msg| msg[:role] == "user" || msg["role"] == "user" }
|
341
|
+
|
342
|
+
if first_user_index
|
343
|
+
msg = modified[first_user_index].dup
|
344
|
+
role_key = msg.key?(:role) ? :role : "role"
|
345
|
+
content_key = msg.key?(:content) ? :content : "content"
|
346
|
+
|
347
|
+
msg[content_key] = "#{role_content}\n\n#{msg[content_key]}"
|
348
|
+
modified[first_user_index] = msg
|
349
|
+
end
|
350
|
+
|
351
|
+
modified
|
352
|
+
end
|
353
|
+
|
305
354
|
def multi_model_chat(prompt_or_contexts)
|
306
355
|
results = {}
|
307
356
|
|
@@ -310,20 +359,23 @@ module AIA
|
|
310
359
|
prompt_or_contexts.keys.all? { |k| @models.include?(k) }
|
311
360
|
|
312
361
|
Async do |task|
|
313
|
-
@models.each do |
|
362
|
+
@models.each do |internal_id|
|
314
363
|
task.async do
|
315
364
|
begin
|
316
365
|
# Use model-specific context if available, otherwise shared prompt
|
317
366
|
prompt = if per_model_contexts
|
318
|
-
prompt_or_contexts[
|
367
|
+
prompt_or_contexts[internal_id]
|
319
368
|
else
|
320
369
|
prompt_or_contexts
|
321
370
|
end
|
322
371
|
|
323
|
-
|
324
|
-
|
372
|
+
# Add per-model role if specified (ADR-005)
|
373
|
+
prompt = prepend_model_role(prompt, internal_id)
|
374
|
+
|
375
|
+
result = single_model_chat(prompt, internal_id)
|
376
|
+
results[internal_id] = result
|
325
377
|
rescue StandardError => e
|
326
|
-
results[
|
378
|
+
results[internal_id] = "Error with #{internal_id}: #{e.message}"
|
327
379
|
end
|
328
380
|
end
|
329
381
|
end
|
@@ -355,14 +407,17 @@ module AIA
|
|
355
407
|
primary_chat = @chats[primary_model]
|
356
408
|
|
357
409
|
# Build the consensus prompt with all model responses
|
410
|
+
# Note: This prompt does NOT include the model's role (ADR-005)
|
411
|
+
# The primary model synthesizes neutrally without role bias
|
358
412
|
consensus_prompt = build_consensus_prompt(results)
|
359
413
|
|
360
414
|
begin
|
361
415
|
# Have the primary model generate the consensus
|
416
|
+
# The consensus prompt is already role-neutral
|
362
417
|
consensus_result = primary_chat.ask(consensus_prompt).content
|
363
418
|
|
364
|
-
# Format the consensus response
|
365
|
-
"from: #{primary_model}
|
419
|
+
# Format the consensus response - no role label for consensus
|
420
|
+
"from: #{primary_model}\n#{consensus_result}"
|
366
421
|
rescue StandardError => e
|
367
422
|
# If consensus fails, fall back to individual responses
|
368
423
|
"Error generating consensus: #{e.message}\n\n" + format_individual_responses(results)
|
@@ -406,10 +461,14 @@ module AIA
|
|
406
461
|
# Return structured data that preserves metrics for multi-model
|
407
462
|
format_multi_model_with_metrics(results)
|
408
463
|
else
|
409
|
-
# Original string formatting for non-metrics mode
|
464
|
+
# Original string formatting for non-metrics mode with role labels (ADR-005)
|
410
465
|
output = []
|
411
|
-
results.each do |
|
412
|
-
|
466
|
+
results.each do |internal_id, result|
|
467
|
+
# Get model spec to include role in output
|
468
|
+
spec = get_model_spec(internal_id)
|
469
|
+
display_name = format_model_display_name(spec)
|
470
|
+
|
471
|
+
output << "from: #{display_name}"
|
413
472
|
# Extract content from RubyLLM::Message if needed
|
414
473
|
content = if result.respond_to?(:content)
|
415
474
|
result.content
|
@@ -423,6 +482,27 @@ module AIA
|
|
423
482
|
end
|
424
483
|
end
|
425
484
|
|
485
|
+
# Format display name with instance number and role (ADR-005)
|
486
|
+
def format_model_display_name(spec)
|
487
|
+
return spec unless spec.is_a?(Hash)
|
488
|
+
|
489
|
+
model_name = spec[:model]
|
490
|
+
instance = spec[:instance]
|
491
|
+
role = spec[:role]
|
492
|
+
|
493
|
+
# Add instance number if > 1
|
494
|
+
display = if instance > 1
|
495
|
+
"#{model_name} ##{instance}"
|
496
|
+
else
|
497
|
+
model_name
|
498
|
+
end
|
499
|
+
|
500
|
+
# Add role label if present
|
501
|
+
display += " (#{role})" if role
|
502
|
+
|
503
|
+
display
|
504
|
+
end
|
505
|
+
|
426
506
|
def format_multi_model_with_metrics(results)
|
427
507
|
# Create a composite response that includes all model responses and metrics
|
428
508
|
formatted_content = []
|
@@ -610,16 +690,40 @@ module AIA
|
|
610
690
|
def extract_models_config
|
611
691
|
models_config = AIA.config.model
|
612
692
|
|
613
|
-
# Handle backward compatibility
|
693
|
+
# Handle backward compatibility
|
614
694
|
if models_config.is_a?(String)
|
615
|
-
|
695
|
+
# Old format: single string
|
696
|
+
[{model: models_config, role: nil, instance: 1, internal_id: models_config}]
|
616
697
|
elsif models_config.is_a?(Array)
|
617
|
-
models_config
|
698
|
+
if models_config.empty?
|
699
|
+
# Empty array - use default
|
700
|
+
[{model: 'gpt-4o-mini', role: nil, instance: 1, internal_id: 'gpt-4o-mini'}]
|
701
|
+
elsif models_config.first.is_a?(Hash)
|
702
|
+
# New format: array of hashes with model specs
|
703
|
+
models_config
|
704
|
+
else
|
705
|
+
# Old format: array of strings
|
706
|
+
models_config.map { |m| {model: m, role: nil, instance: 1, internal_id: m} }
|
707
|
+
end
|
618
708
|
else
|
619
|
-
|
709
|
+
# Fallback to default
|
710
|
+
[{model: 'gpt-4o-mini', role: nil, instance: 1, internal_id: 'gpt-4o-mini'}]
|
711
|
+
end
|
712
|
+
end
|
713
|
+
|
714
|
+
def extract_model_names(model_specs)
|
715
|
+
# Extract just the model names from the specs
|
716
|
+
# For models with instance > 1, use internal_id (e.g., "gpt-4o#2")
|
717
|
+
model_specs.map do |spec|
|
718
|
+
spec[:internal_id]
|
620
719
|
end
|
621
720
|
end
|
622
721
|
|
722
|
+
def get_model_spec(internal_id)
|
723
|
+
# Find the spec for a given internal_id
|
724
|
+
@model_specs.find { |spec| spec[:internal_id] == internal_id }
|
725
|
+
end
|
726
|
+
|
623
727
|
|
624
728
|
def extract_text_prompt(prompt)
|
625
729
|
if prompt.is_a?(String)
|
data/lib/aia/session.rb
CHANGED
@@ -45,12 +45,19 @@ module AIA
|
|
45
45
|
end
|
46
46
|
|
47
47
|
def initialize_components
|
48
|
-
# For multi-model: create separate context manager per model (ADR-002 revised)
|
48
|
+
# For multi-model: create separate context manager per model (ADR-002 revised + ADR-005)
|
49
49
|
# For single-model: maintain backward compatibility with single context manager
|
50
50
|
if AIA.config.model.is_a?(Array) && AIA.config.model.size > 1
|
51
51
|
@context_managers = {}
|
52
|
-
AIA.config.model.each do |
|
53
|
-
|
52
|
+
AIA.config.model.each do |model_spec|
|
53
|
+
# Handle both old string format and new hash format (ADR-005)
|
54
|
+
internal_id = if model_spec.is_a?(Hash)
|
55
|
+
model_spec[:internal_id]
|
56
|
+
else
|
57
|
+
model_spec
|
58
|
+
end
|
59
|
+
|
60
|
+
@context_managers[internal_id] = ContextManager.new(
|
54
61
|
system_prompt: AIA.config.system_prompt
|
55
62
|
)
|
56
63
|
end
|
@@ -528,9 +535,9 @@ module AIA
|
|
528
535
|
"I executed this directive: #{follow_up_prompt}\nHere's the output: #{directive_output}\nLet's continue our conversation."
|
529
536
|
end
|
530
537
|
|
531
|
-
# Parse multi-model response into per-model responses (ADR-002 revised)
|
532
|
-
# Input: "from: lms/model\nHabari!\n\nfrom: ollama/model\nKaixo!"
|
533
|
-
# Output: {"lms/model" => "Habari!", "ollama/model" => "Kaixo!"}
|
538
|
+
# Parse multi-model response into per-model responses (ADR-002 revised + ADR-005)
|
539
|
+
# Input: "from: lms/model #2 (role)\nHabari!\n\nfrom: ollama/model\nKaixo!"
|
540
|
+
# Output: {"lms/model#2" => "Habari!", "ollama/model" => "Kaixo!"}
|
534
541
|
def parse_multi_model_response(combined_response)
|
535
542
|
return {} if combined_response.nil? || combined_response.empty?
|
536
543
|
|
@@ -545,8 +552,17 @@ module AIA
|
|
545
552
|
responses[current_model] = current_content.join.strip
|
546
553
|
end
|
547
554
|
|
548
|
-
#
|
549
|
-
|
555
|
+
# Extract internal_id from display name (ADR-005)
|
556
|
+
# Display format: "model_name #N (role)" or "model_name (role)" or "model_name #N" or "model_name"
|
557
|
+
display_name = $1.strip
|
558
|
+
|
559
|
+
# Remove role part: " (role_name)"
|
560
|
+
internal_id = display_name.sub(/\s+\([^)]+\)\s*$/, '')
|
561
|
+
|
562
|
+
# Remove space before instance number: "model #2" -> "model#2"
|
563
|
+
internal_id = internal_id.sub(/\s+#/, '#')
|
564
|
+
|
565
|
+
current_model = internal_id
|
550
566
|
current_content = []
|
551
567
|
elsif current_model
|
552
568
|
current_content << line
|
data/lib/aia/utility.rb
CHANGED
@@ -28,11 +28,29 @@ module AIA
|
|
28
28
|
|
29
29
|
mcp_version = defined?(RubyLLM::MCP::VERSION) ? " MCP v" + RubyLLM::MCP::VERSION : ''
|
30
30
|
|
31
|
+
# Extract model names from config (handles hash format from ADR-005)
|
32
|
+
model_display = if AIA.config&.model
|
33
|
+
models = AIA.config.model
|
34
|
+
if models.is_a?(String)
|
35
|
+
models
|
36
|
+
elsif models.is_a?(Array)
|
37
|
+
if models.first.is_a?(Hash)
|
38
|
+
models.map { |spec| spec[:model] }.join(', ')
|
39
|
+
else
|
40
|
+
models.join(', ')
|
41
|
+
end
|
42
|
+
else
|
43
|
+
models.to_s
|
44
|
+
end
|
45
|
+
else
|
46
|
+
'unknown-model'
|
47
|
+
end
|
48
|
+
|
31
49
|
puts <<-ROBOT
|
32
50
|
|
33
51
|
, ,
|
34
52
|
(\\____/) AI Assistant (v#{AIA::VERSION}) is Online
|
35
|
-
(_oo_) #{
|
53
|
+
(_oo_) #{model_display}#{supports_tools? ? ' (supports tools)' : ''}
|
36
54
|
(O) using #{AIA.config&.adapter || 'unknown-adapter'} (v#{RubyLLM::VERSION}#{mcp_version})
|
37
55
|
__||__ \\) model db was last refreshed on
|
38
56
|
[/______\\] / #{AIA.config&.last_refresh || 'unknown'}
|