aia 0.9.11 → 0.9.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/.envrc +2 -0
  3. data/.version +1 -1
  4. data/CHANGELOG.md +66 -2
  5. data/README.md +133 -4
  6. data/docs/advanced-prompting.md +721 -0
  7. data/docs/cli-reference.md +582 -0
  8. data/docs/configuration.md +347 -0
  9. data/docs/contributing.md +332 -0
  10. data/docs/directives-reference.md +490 -0
  11. data/docs/examples/index.md +277 -0
  12. data/docs/examples/mcp/index.md +479 -0
  13. data/docs/examples/prompts/analysis/index.md +78 -0
  14. data/docs/examples/prompts/automation/index.md +108 -0
  15. data/docs/examples/prompts/development/index.md +125 -0
  16. data/docs/examples/prompts/index.md +333 -0
  17. data/docs/examples/prompts/learning/index.md +127 -0
  18. data/docs/examples/prompts/writing/index.md +62 -0
  19. data/docs/examples/tools/index.md +292 -0
  20. data/docs/faq.md +414 -0
  21. data/docs/guides/available-models.md +366 -0
  22. data/docs/guides/basic-usage.md +477 -0
  23. data/docs/guides/chat.md +474 -0
  24. data/docs/guides/executable-prompts.md +417 -0
  25. data/docs/guides/first-prompt.md +454 -0
  26. data/docs/guides/getting-started.md +455 -0
  27. data/docs/guides/image-generation.md +507 -0
  28. data/docs/guides/index.md +46 -0
  29. data/docs/guides/models.md +507 -0
  30. data/docs/guides/tools.md +856 -0
  31. data/docs/index.md +173 -0
  32. data/docs/installation.md +238 -0
  33. data/docs/mcp-integration.md +612 -0
  34. data/docs/prompt_management.md +579 -0
  35. data/docs/security.md +629 -0
  36. data/docs/tools-and-mcp-examples.md +1186 -0
  37. data/docs/workflows-and-pipelines.md +563 -0
  38. data/examples/tools/mcp/github_mcp_server.json +11 -0
  39. data/examples/tools/mcp/imcp.json +7 -0
  40. data/lib/aia/chat_processor_service.rb +38 -7
  41. data/lib/aia/config/base.rb +224 -0
  42. data/lib/aia/config/cli_parser.rb +418 -0
  43. data/lib/aia/config/defaults.rb +88 -0
  44. data/lib/aia/config/file_loader.rb +131 -0
  45. data/lib/aia/config/validator.rb +184 -0
  46. data/lib/aia/config.rb +10 -860
  47. data/lib/aia/directive_processor.rb +27 -372
  48. data/lib/aia/directives/configuration.rb +114 -0
  49. data/lib/aia/directives/execution.rb +37 -0
  50. data/lib/aia/directives/models.rb +178 -0
  51. data/lib/aia/directives/registry.rb +120 -0
  52. data/lib/aia/directives/utility.rb +70 -0
  53. data/lib/aia/directives/web_and_file.rb +71 -0
  54. data/lib/aia/prompt_handler.rb +23 -3
  55. data/lib/aia/ruby_llm_adapter.rb +367 -130
  56. data/lib/aia/session.rb +54 -18
  57. data/lib/aia/ui_presenter.rb +206 -0
  58. data/lib/aia/utility.rb +12 -8
  59. data/lib/aia.rb +11 -2
  60. data/lib/extensions/ruby_llm/.irbrc +56 -0
  61. data/mkdocs.yml +165 -0
  62. metadata +79 -37
  63. data/_notes.txt +0 -231
  64. /data/{images → docs/assets/images}/aia.png +0 -0
@@ -1,39 +1,48 @@
1
1
  # lib/aia/ruby_llm_adapter.rb
2
2
 
3
+ require 'async'
4
+
3
5
  module AIA
4
6
  class RubyLLMAdapter
5
7
  attr_reader :tools
6
8
 
7
9
  def initialize
8
- @provider, @model = extract_model_parts.values
10
+ @models = extract_models_config
11
+ @chats = {}
9
12
 
10
13
  configure_rubyllm
11
14
  refresh_local_model_registry
12
- setup_chat_with_tools
15
+ setup_chats_with_tools
13
16
  end
14
17
 
18
+
15
19
  def configure_rubyllm
16
20
  # TODO: Add some of these configuration items to AIA.config
17
21
  RubyLLM.configure do |config|
18
- config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
19
- config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
20
- config.openai_project_id = ENV.fetch('OPENAI_PROJECT_ID', nil)
21
-
22
22
  config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
23
- config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
24
23
  config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
24
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
25
+ config.gpustack_api_key = ENV.fetch('GPUSTACK_API_KEY', nil)
26
+ config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
25
27
  config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
28
+ config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
29
+
30
+ # These providers require a little something extra
31
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
32
+ config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
33
+ config.openai_project_id = ENV.fetch('OPENAI_PROJECT_ID', nil)
26
34
 
27
35
  config.bedrock_api_key = ENV.fetch('BEDROCK_ACCESS_KEY_ID', nil)
28
36
  config.bedrock_secret_key = ENV.fetch('BEDROCK_SECRET_ACCESS_KEY', nil)
29
37
  config.bedrock_region = ENV.fetch('BEDROCK_REGION', nil)
30
38
  config.bedrock_session_token = ENV.fetch('BEDROCK_SESSION_TOKEN', nil)
31
39
 
32
- config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)
40
+ # Ollama is based upon the OpenAI API so it needs to over-ride a few things
41
+ config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)
33
42
 
34
43
  # --- Custom OpenAI Endpoint ---
35
44
  # Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
36
- config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
45
+ config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
37
46
 
38
47
  # --- Default Models ---
39
48
  # Used by RubyLLM.chat, RubyLLM.embed, RubyLLM.paint if no model is specified.
@@ -42,49 +51,97 @@ module AIA
42
51
  # config.default_image_model = 'dall-e-3' # Default: 'dall-e-3'
43
52
 
44
53
  # --- Connection Settings ---
45
- # config.request_timeout = 120 # Request timeout in seconds (default: 120)
46
- # config.max_retries = 3 # Max retries on transient network errors (default: 3)
47
- # config.retry_interval = 0.1 # Initial delay in seconds (default: 0.1)
48
- # config.retry_backoff_factor = 2 # Multiplier for subsequent retries (default: 2)
49
- # config.retry_interval_randomness = 0.5 # Jitter factor (default: 0.5)
50
-
51
- # --- Logging Settings ---
54
+ config.request_timeout = 120 # Request timeout in seconds (default: 120)
55
+ config.max_retries = 3 # Max retries on transient network errors (default: 3)
56
+ config.retry_interval = 0.1 # Initial delay in seconds (default: 0.1)
57
+ config.retry_backoff_factor = 2 # Multiplier for subsequent retries (default: 2)
58
+ config.retry_interval_randomness = 0.5 # Jitter factor (default: 0.5)
59
+
60
+ # Connection pooling settings removed - not supported in current RubyLLM version
61
+ # config.connection_pool_size = 10 # Number of connections to maintain in pool
62
+ # config.connection_pool_timeout = 60 # Connection pool timeout in seconds
52
63
  # config.log_file = '/logs/ruby_llm.log'
53
- config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
64
+ config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
54
65
  end
55
66
  end
56
67
 
68
+
57
69
  def refresh_local_model_registry
58
70
  if AIA.config.refresh.nil? ||
59
71
  Integer(AIA.config.refresh).zero? ||
60
72
  Date.today > (AIA.config.last_refresh + Integer(AIA.config.refresh))
61
73
  RubyLLM.models.refresh!
62
74
  AIA.config.last_refresh = Date.today
63
- if AIA.config.config_file
64
- AIA::Config.dump_config(AIA.config, AIA.config.config_file)
65
- end
75
+ AIA::Config.dump_config(AIA.config, AIA.config.config_file) if AIA.config.config_file
66
76
  end
67
77
  end
68
78
 
69
79
 
70
- def setup_chat_with_tools
71
- begin
72
- @chat = RubyLLM.chat(model: @model)
73
- @model = @chat.model.name if @model.nil? # using default model
74
- rescue => e
75
- STDERR.puts "ERROR: #{e.message}"
80
+ def setup_chats_with_tools
81
+ valid_chats = {}
82
+ failed_models = []
83
+
84
+ @models.each do |model_name|
85
+ begin
86
+ chat = RubyLLM.chat(model: model_name)
87
+ valid_chats[model_name] = chat
88
+ rescue StandardError => e
89
+ failed_models << "#{model_name}: #{e.message}"
90
+ end
91
+ end
92
+
93
+ # Report failed models but continue with valid ones
94
+ unless failed_models.empty?
95
+ puts "\n❌ Failed to initialize the following models:"
96
+ failed_models.each { |failure| puts " - #{failure}" }
97
+ end
98
+
99
+ # If no models initialized successfully, exit
100
+ if valid_chats.empty?
101
+ puts "\n❌ No valid models could be initialized. Exiting."
102
+ puts "\n💡 Available models can be listed with: bin/aia --help models"
76
103
  exit 1
77
104
  end
78
105
 
79
- unless @chat.model.supports_functions?
80
- AIA.config.tools = []
81
- AIA.config.tool_names = ""
82
- return
106
+ @chats = valid_chats
107
+ @models = valid_chats.keys
108
+
109
+ # Update the config to reflect only the valid models
110
+ AIA.config.model = @models
111
+
112
+ # Report successful models
113
+ if failed_models.any?
114
+ puts "\n✅ Successfully initialized: #{@models.join(', ')}"
115
+ puts
83
116
  end
84
117
 
85
- load_tools
118
+ # Use the first chat to determine tool support (assuming all models have similar tool support)
119
+ first_chat = @chats.values.first
120
+ return unless first_chat&.model&.supports_functions?
121
+
122
+ load_tools_lazy_mcp_support_only_when_needed
123
+
124
+ @chats.each_value do |chat|
125
+ chat.with_tools(*tools) unless tools.empty?
126
+ end
127
+ end
128
+
86
129
 
87
- @chat.with_tools(*tools) unless tools.empty?
130
+ def load_tools_lazy_mcp_support_only_when_needed
131
+ @tools = []
132
+
133
+ support_local_tools
134
+ support_mcp_lazy
135
+ filter_tools_by_allowed_list
136
+ filter_tools_by_rejected_list
137
+ drop_duplicate_tools
138
+
139
+ if tools.empty?
140
+ AIA.config.tool_names = ''
141
+ else
142
+ AIA.config.tool_names = @tools.map(&:name).join(', ')
143
+ AIA.config.tools = @tools
144
+ end
88
145
  end
89
146
 
90
147
 
@@ -98,7 +155,7 @@ module AIA
98
155
  drop_duplicate_tools
99
156
 
100
157
  if tools.empty?
101
- AIA.config.tool_names = ""
158
+ AIA.config.tool_names = ''
102
159
  else
103
160
  AIA.config.tool_names = @tools.map(&:name).join(', ')
104
161
  AIA.config.tools = @tools
@@ -113,11 +170,24 @@ module AIA
113
170
  end
114
171
 
115
172
 
173
+ def support_mcp_lazy
174
+ # Only load MCP tools if MCP servers are actually configured
175
+ return if AIA.config.mcp_servers.nil? || AIA.config.mcp_servers.empty?
176
+
177
+ begin
178
+ RubyLLM::MCP.establish_connection
179
+ @tools += RubyLLM::MCP.tools
180
+ rescue StandardError => e
181
+ warn "Warning: Failed to connect MCP clients: #{e.message}"
182
+ end
183
+ end
184
+
185
+
116
186
  def support_mcp
117
187
  RubyLLM::MCP.establish_connection
118
188
  @tools += RubyLLM::MCP.tools
119
- rescue => e
120
- STDERR.puts "Warning: Failed to connect MCP clients: #{e.message}"
189
+ rescue StandardError => e
190
+ warn "Warning: Failed to connect MCP clients: #{e.message}"
121
191
  end
122
192
 
123
193
 
@@ -128,7 +198,7 @@ module AIA
128
198
  @tools.select! do |tool|
129
199
  tool_name = tool.name
130
200
  if seen_names.include?(tool_name)
131
- STDERR.puts "WARNING: Duplicate tool name detected: '#{tool_name}'. Only the first occurrence will be used."
201
+ warn "WARNING: Duplicate tool name detected: '#{tool_name}'. Only the first occurrence will be used."
132
202
  false
133
203
  else
134
204
  seen_names.add(tool_name)
@@ -137,110 +207,273 @@ module AIA
137
207
  end
138
208
 
139
209
  removed_count = original_size - @tools.size
140
- STDERR.puts "Removed #{removed_count} duplicate tools" if removed_count > 0
210
+ warn "Removed #{removed_count} duplicate tools" if removed_count > 0
141
211
  end
142
212
 
143
213
 
144
- # TODO: Need to rethink this dispatcher pattern w/r/t RubyLLM's capabilities
145
- # This code was originally designed for AiClient
146
- #
147
214
  def chat(prompt)
148
- modes = @chat.model.modalities
215
+ if @models.size == 1
216
+ # Single model - use the original behavior
217
+ single_model_chat(prompt, @models.first)
218
+ else
219
+ # Multiple models - use concurrent processing
220
+ multi_model_chat(prompt)
221
+ end
222
+ end
223
+
224
+ def single_model_chat(prompt, model_name)
225
+ chat_instance = @chats[model_name]
226
+ modes = chat_instance.model.modalities
149
227
 
150
228
  # TODO: Need to consider how to handle multi-mode models
151
229
  if modes.text_to_text?
152
- text_to_text(prompt)
153
-
230
+ text_to_text_single(prompt, model_name)
154
231
  elsif modes.image_to_text?
155
- image_to_text(prompt)
232
+ image_to_text_single(prompt, model_name)
156
233
  elsif modes.text_to_image?
157
- text_to_image(prompt)
158
-
234
+ text_to_image_single(prompt, model_name)
159
235
  elsif modes.text_to_audio?
160
- text_to_audio(prompt)
236
+ text_to_audio_single(prompt, model_name)
161
237
  elsif modes.audio_to_text?
162
- audio_to_text(prompt)
163
-
238
+ audio_to_text_single(prompt, model_name)
164
239
  else
165
240
  # TODO: what else can be done?
166
241
  end
167
242
  end
168
243
 
244
+ def multi_model_chat(prompt)
245
+ results = {}
246
+
247
+ Async do |task|
248
+ @models.each do |model_name|
249
+ task.async do
250
+ begin
251
+ result = single_model_chat(prompt, model_name)
252
+ results[model_name] = result
253
+ rescue StandardError => e
254
+ results[model_name] = "Error with #{model_name}: #{e.message}"
255
+ end
256
+ end
257
+ end
258
+ end
259
+
260
+ # Format and return results from all models
261
+ format_multi_model_results(results)
262
+ end
263
+
264
+ def format_multi_model_results(results)
265
+ use_consensus = should_use_consensus_mode?
266
+
267
+ if use_consensus
268
+ # Generate consensus response using primary model
269
+ generate_consensus_response(results)
270
+ else
271
+ # Show individual responses from all models
272
+ format_individual_responses(results)
273
+ end
274
+ end
275
+
276
+ def should_use_consensus_mode?
277
+ # Only use consensus when explicitly enabled with --consensus flag
278
+ AIA.config.consensus == true
279
+ end
280
+
281
+ def generate_consensus_response(results)
282
+ primary_model = @models.first
283
+ primary_chat = @chats[primary_model]
284
+
285
+ # Build the consensus prompt with all model responses
286
+ consensus_prompt = build_consensus_prompt(results)
287
+
288
+ begin
289
+ # Have the primary model generate the consensus
290
+ consensus_result = primary_chat.ask(consensus_prompt).content
291
+
292
+ # Format the consensus response
293
+ "from: #{primary_model} (consensus)\n#{consensus_result}"
294
+ rescue StandardError => e
295
+ # If consensus fails, fall back to individual responses
296
+ "Error generating consensus: #{e.message}\n\n" + format_individual_responses(results)
297
+ end
298
+ end
299
+
300
+ def build_consensus_prompt(results)
301
+ prompt_parts = []
302
+ prompt_parts << "You are tasked with creating a consensus response based on multiple AI model responses to the same query."
303
+ prompt_parts << "Please analyze the following responses and provide a unified, comprehensive answer that:"
304
+ prompt_parts << "- Incorporates the best insights from all models"
305
+ prompt_parts << "- Resolves any contradictions with clear reasoning"
306
+ prompt_parts << "- Provides additional context or clarification when helpful"
307
+ prompt_parts << "- Maintains accuracy and avoids speculation"
308
+ prompt_parts << ""
309
+ prompt_parts << "Model responses:"
310
+ prompt_parts << ""
311
+
312
+ results.each do |model_name, result|
313
+ # Extract content from RubyLLM::Message if needed
314
+ content = if result.respond_to?(:content)
315
+ result.content
316
+ else
317
+ result.to_s
318
+ end
319
+ next if content.start_with?("Error with")
320
+ prompt_parts << "#{model_name}:"
321
+ prompt_parts << content
322
+ prompt_parts << ""
323
+ end
324
+
325
+ prompt_parts << "Please provide your consensus response:"
326
+ prompt_parts.join("\n")
327
+ end
328
+
329
+ def format_individual_responses(results)
330
+ # For metrics support, return a special structure if all results have token info
331
+ has_metrics = results.values.all? { |r| r.respond_to?(:input_tokens) && r.respond_to?(:output_tokens) }
332
+
333
+ if has_metrics && AIA.config.show_metrics
334
+ # Return structured data that preserves metrics for multi-model
335
+ format_multi_model_with_metrics(results)
336
+ else
337
+ # Original string formatting for non-metrics mode
338
+ output = []
339
+ results.each do |model_name, result|
340
+ output << "from: #{model_name}"
341
+ # Extract content from RubyLLM::Message if needed
342
+ content = if result.respond_to?(:content)
343
+ result.content
344
+ else
345
+ result.to_s
346
+ end
347
+ output << content
348
+ output << "" # Add blank line between results
349
+ end
350
+ output.join("\n")
351
+ end
352
+ end
353
+
354
+ def format_multi_model_with_metrics(results)
355
+ # Create a composite response that includes all model responses and metrics
356
+ formatted_content = []
357
+ metrics_data = []
358
+
359
+ results.each do |model_name, result|
360
+ formatted_content << "from: #{model_name}"
361
+ formatted_content << result.content
362
+ formatted_content << ""
363
+
364
+ # Collect metrics for each model
365
+ metrics_data << {
366
+ model_id: model_name,
367
+ input_tokens: result.input_tokens,
368
+ output_tokens: result.output_tokens
369
+ }
370
+ end
371
+
372
+ # Return a special MultiModelResponse that ChatProcessorService can handle
373
+ MultiModelResponse.new(formatted_content.join("\n"), metrics_data)
374
+ end
375
+
376
+ # Helper class to carry multi-model response with metrics
377
+ class MultiModelResponse
378
+ attr_reader :content, :metrics_list
379
+
380
+ def initialize(content, metrics_list)
381
+ @content = content
382
+ @metrics_list = metrics_list
383
+ end
384
+
385
+ def multi_model?
386
+ true
387
+ end
388
+ end
389
+
390
+
169
391
  def transcribe(audio_file)
170
- @chat.ask("Transcribe this audio", with: audio_file).content
392
+ # Use the first model for transcription
393
+ first_model = @models.first
394
+ @chats[first_model].ask('Transcribe this audio', with: audio_file).content
171
395
  end
172
396
 
173
- def speak(text)
397
+
398
+ def speak(_text)
174
399
  output_file = "#{Time.now.to_i}.mp3"
175
400
 
176
- # Note: RubyLLM doesn't have a direct text-to-speech feature
401
+ # NOTE: RubyLLM doesn't have a direct text-to-speech feature
177
402
  # This is a placeholder for a custom implementation or external service
178
403
  begin
179
404
  # Try using a TTS API if available
180
405
  # For now, we'll use a mock implementation
181
- File.write(output_file, "Mock TTS audio content")
182
- system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
406
+ File.write(output_file, 'Mock TTS audio content')
407
+ if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
408
+ system("#{AIA.config.speak_command} #{output_file}")
409
+ end
183
410
  "Audio generated and saved to: #{output_file}"
184
- rescue => e
411
+ rescue StandardError => e
185
412
  "Error generating audio: #{e.message}"
186
413
  end
187
414
  end
188
415
 
416
+
189
417
  # Clear the chat context/history
190
418
  # Needed for the //clear directive
191
419
  def clear_context
192
- begin
420
+ @chats.each do |model_name, chat|
193
421
  # Option 1: Directly clear the messages array in the current chat object
194
- if @chat.instance_variable_defined?(:@messages)
195
- old_messages = @chat.instance_variable_get(:@messages)
422
+ if chat.instance_variable_defined?(:@messages)
423
+ chat.instance_variable_get(:@messages)
196
424
  # Force a completely empty array, not just attempting to clear it
197
- @chat.instance_variable_set(:@messages, [])
425
+ chat.instance_variable_set(:@messages, [])
198
426
  end
427
+ end
199
428
 
200
- # Option 2: Force RubyLLM to create a new chat instance at the global level
201
- # This ensures any shared state is reset
202
- @provider, @model = extract_model_parts.values
203
- RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
429
+ # Option 2: Force RubyLLM to create a new chat instance at the global level
430
+ # This ensures any shared state is reset
431
+ RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
204
432
 
205
- # Option 3: Create a completely fresh chat instance for this adapter
206
- @chat = nil # First nil it to help garbage collection
433
+ # Option 3: Create completely fresh chat instances for this adapter
434
+ @chats = {} # First nil the chats hash
207
435
 
208
- begin
209
- @chat = RubyLLM.chat(model: @model)
210
- rescue => e
211
- STDERR.puts "ERROR: #{e.message}"
212
- exit 1
436
+ begin
437
+ @models.each do |model_name|
438
+ @chats[model_name] = RubyLLM.chat(model: model_name)
213
439
  end
440
+ rescue StandardError => e
441
+ warn "ERROR: #{e.message}"
442
+ exit 1
443
+ end
214
444
 
215
- # Option 4: Call official clear_history method if it exists
216
- if @chat.respond_to?(:clear_history)
217
- @chat.clear_history
218
- end
445
+ # Option 4: Call official clear_history method if it exists
446
+ @chats.each_value do |chat|
447
+ chat.clear_history if chat.respond_to?(:clear_history)
448
+ end
219
449
 
220
- # Option 5: If chat has messages, force set it to empty again as a final check
221
- if @chat.instance_variable_defined?(:@messages) && !@chat.instance_variable_get(:@messages).empty?
222
- @chat.instance_variable_set(:@messages, [])
450
+ # Final verification
451
+ @chats.each_value do |chat|
452
+ if chat.instance_variable_defined?(:@messages) && !chat.instance_variable_get(:@messages).empty?
453
+ chat.instance_variable_set(:@messages, [])
223
454
  end
224
-
225
- # Final verification
226
- new_messages = @chat.instance_variable_defined?(:@messages) ? @chat.instance_variable_get(:@messages) : []
227
-
228
- return "Chat context successfully cleared."
229
- rescue => e
230
- return "Error clearing chat context: #{e.message}"
231
455
  end
456
+
457
+ return 'Chat context successfully cleared.'
458
+ rescue StandardError => e
459
+ return "Error clearing chat context: #{e.message}"
232
460
  end
233
461
 
462
+
234
463
  def method_missing(method, *args, &block)
235
- if @chat.respond_to?(method)
236
- @chat.public_send(method, *args, &block)
464
+ # Use the first chat instance for backward compatibility with method_missing
465
+ first_chat = @chats.values.first
466
+ if first_chat&.respond_to?(method)
467
+ first_chat.public_send(method, *args, &block)
237
468
  else
238
469
  super
239
470
  end
240
471
  end
241
472
 
473
+
242
474
  def respond_to_missing?(method, include_private = false)
243
- @chat.respond_to?(method) || super
475
+ # Check if any of our chat instances respond to the method
476
+ @chats.values.any? { |chat| chat.respond_to?(method) } || super
244
477
  end
245
478
 
246
479
  private
@@ -254,6 +487,7 @@ module AIA
254
487
  end
255
488
  end
256
489
 
490
+
257
491
  def filter_tools_by_rejected_list
258
492
  return if AIA.config.rejected_tools.nil?
259
493
 
@@ -263,24 +497,21 @@ module AIA
263
497
  end
264
498
  end
265
499
 
266
- def extract_model_parts
267
- parts = AIA.config.model.split('/')
268
- parts.map!(&:strip)
269
500
 
270
- if 2 == parts.length
271
- provider = parts[0]
272
- model = parts[1]
273
- elsif 1 == parts.length
274
- provider = nil # RubyLLM will figure it out from the model name
275
- model = parts[0]
501
+ def extract_models_config
502
+ models_config = AIA.config.model
503
+
504
+ # Handle backward compatibility - if it's a string, convert to array
505
+ if models_config.is_a?(String)
506
+ [models_config]
507
+ elsif models_config.is_a?(Array)
508
+ models_config
276
509
  else
277
- STDERR.puts "ERROR: malformed model name: #{AIA.config.model}"
278
- exit 1
510
+ ['gpt-4o-mini'] # fallback to default
279
511
  end
280
-
281
- { provider: provider, model: model }
282
512
  end
283
513
 
514
+
284
515
  def extract_text_prompt(prompt)
285
516
  if prompt.is_a?(String)
286
517
  prompt
@@ -297,16 +528,18 @@ module AIA
297
528
  #########################################
298
529
  ## text
299
530
 
300
- def text_to_text(prompt)
531
+ def text_to_text_single(prompt, model_name)
532
+ chat_instance = @chats[model_name]
301
533
  text_prompt = extract_text_prompt(prompt)
302
- response = if AIA.config.context_files.empty?
303
- @chat.ask(text_prompt)
304
- else
305
- @chat.ask(text_prompt, with: AIA.config.context_files)
306
- end
307
-
308
- response.content
309
- rescue => e
534
+ response = if AIA.config.context_files.empty?
535
+ chat_instance.ask(text_prompt)
536
+ else
537
+ chat_instance.ask(text_prompt, with: AIA.config.context_files)
538
+ end
539
+
540
+ # Return the full response object to preserve token information
541
+ response
542
+ rescue StandardError => e
310
543
  e.message
311
544
  end
312
545
 
@@ -316,16 +549,15 @@ module AIA
316
549
 
317
550
  def extract_image_path(prompt)
318
551
  if prompt.is_a?(String)
319
- match = prompt.match(/\b[\w\/\.\-_]+?\.(jpg|jpeg|png|gif|webp)\b/i)
552
+ match = prompt.match(%r{\b[\w/.\-_]+?\.(jpg|jpeg|png|gif|webp)\b}i)
320
553
  match ? match[0] : nil
321
554
  elsif prompt.is_a?(Hash)
322
555
  prompt[:image] || prompt[:image_path]
323
- else
324
- nil
325
556
  end
326
557
  end
327
558
 
328
- def text_to_image(prompt)
559
+
560
+ def text_to_image_single(prompt, model_name)
329
561
  text_prompt = extract_text_prompt(prompt)
330
562
  image_name = extract_image_path(text_prompt)
331
563
 
@@ -337,23 +569,24 @@ module AIA
337
569
  else
338
570
  "Image generated and available at: #{image.url}"
339
571
  end
340
- rescue => e
572
+ rescue StandardError => e
341
573
  "Error generating image: #{e.message}"
342
574
  end
343
575
  end
344
576
 
345
- def image_to_text(prompt)
577
+
578
+ def image_to_text_single(prompt, model_name)
346
579
  image_path = extract_image_path(prompt)
347
580
  text_prompt = extract_text_prompt(prompt)
348
581
 
349
582
  if image_path && File.exist?(image_path)
350
583
  begin
351
- @chat.ask(text_prompt, with: image_path).content
352
- rescue => e
584
+ @chats[model_name].ask(text_prompt, with: image_path).content
585
+ rescue StandardError => e
353
586
  "Error analyzing image: #{e.message}"
354
587
  end
355
588
  else
356
- text_to_text(prompt)
589
+ text_to_text_single(prompt, model_name)
357
590
  end
358
591
  end
359
592
 
@@ -365,22 +598,26 @@ module AIA
365
598
  filepath.to_s.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
366
599
  end
367
600
 
368
- def text_to_audio(prompt)
601
+
602
+ def text_to_audio_single(prompt, model_name)
369
603
  text_prompt = extract_text_prompt(prompt)
370
604
  output_file = "#{Time.now.to_i}.mp3"
371
605
 
372
606
  begin
373
- # Note: RubyLLM doesn't have a direct TTS feature
607
+ # NOTE: RubyLLM doesn't have a direct TTS feature
374
608
  # TODO: This is a placeholder for a custom implementation
375
609
  File.write(output_file, text_prompt)
376
- system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
610
+ if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
611
+ system("#{AIA.config.speak_command} #{output_file}")
612
+ end
377
613
  "Audio generated and saved to: #{output_file}"
378
- rescue => e
614
+ rescue StandardError => e
379
615
  "Error generating audio: #{e.message}"
380
616
  end
381
617
  end
382
618
 
383
- def audio_to_text(prompt)
619
+
620
+ def audio_to_text_single(prompt, model_name)
384
621
  text_prompt = extract_text_prompt(prompt)
385
622
  text_prompt = 'Transcribe this audio' if text_prompt.nil? || text_prompt.empty?
386
623
 
@@ -391,18 +628,18 @@ module AIA
391
628
  File.exist?(prompt) &&
392
629
  audio_file?(prompt)
393
630
  begin
394
- response = if AIA.config.context_files.empty?
395
- @chat.ask(text_prompt)
396
- else
397
- @chat.ask(text_prompt, with: AIA.config.context_files)
398
- end
631
+ response = if AIA.config.context_files.empty?
632
+ @chats[model_name].ask(text_prompt)
633
+ else
634
+ @chats[model_name].ask(text_prompt, with: AIA.config.context_files)
635
+ end
399
636
  response.content
400
- rescue => e
637
+ rescue StandardError => e
401
638
  "Error transcribing audio: #{e.message}"
402
639
  end
403
640
  else
404
641
  # Fall back to regular chat if no valid audio file is found
405
- text_to_text(prompt)
642
+ text_to_text_single(prompt, model_name)
406
643
  end
407
644
  end
408
645
  end