aia 0.9.11 → 0.9.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/.version +1 -1
  3. data/CHANGELOG.md +66 -2
  4. data/README.md +133 -4
  5. data/docs/advanced-prompting.md +721 -0
  6. data/docs/cli-reference.md +582 -0
  7. data/docs/configuration.md +347 -0
  8. data/docs/contributing.md +332 -0
  9. data/docs/directives-reference.md +490 -0
  10. data/docs/examples/index.md +277 -0
  11. data/docs/examples/mcp/index.md +479 -0
  12. data/docs/examples/prompts/analysis/index.md +78 -0
  13. data/docs/examples/prompts/automation/index.md +108 -0
  14. data/docs/examples/prompts/development/index.md +125 -0
  15. data/docs/examples/prompts/index.md +333 -0
  16. data/docs/examples/prompts/learning/index.md +127 -0
  17. data/docs/examples/prompts/writing/index.md +62 -0
  18. data/docs/examples/tools/index.md +292 -0
  19. data/docs/faq.md +414 -0
  20. data/docs/guides/available-models.md +366 -0
  21. data/docs/guides/basic-usage.md +477 -0
  22. data/docs/guides/chat.md +474 -0
  23. data/docs/guides/executable-prompts.md +417 -0
  24. data/docs/guides/first-prompt.md +454 -0
  25. data/docs/guides/getting-started.md +455 -0
  26. data/docs/guides/image-generation.md +507 -0
  27. data/docs/guides/index.md +46 -0
  28. data/docs/guides/models.md +507 -0
  29. data/docs/guides/tools.md +856 -0
  30. data/docs/index.md +173 -0
  31. data/docs/installation.md +238 -0
  32. data/docs/mcp-integration.md +612 -0
  33. data/docs/prompt_management.md +579 -0
  34. data/docs/security.md +629 -0
  35. data/docs/tools-and-mcp-examples.md +1186 -0
  36. data/docs/workflows-and-pipelines.md +563 -0
  37. data/examples/tools/mcp/github_mcp_server.json +11 -0
  38. data/examples/tools/mcp/imcp.json +7 -0
  39. data/lib/aia/chat_processor_service.rb +19 -3
  40. data/lib/aia/config/base.rb +224 -0
  41. data/lib/aia/config/cli_parser.rb +409 -0
  42. data/lib/aia/config/defaults.rb +88 -0
  43. data/lib/aia/config/file_loader.rb +131 -0
  44. data/lib/aia/config/validator.rb +184 -0
  45. data/lib/aia/config.rb +10 -860
  46. data/lib/aia/directive_processor.rb +27 -372
  47. data/lib/aia/directives/configuration.rb +114 -0
  48. data/lib/aia/directives/execution.rb +37 -0
  49. data/lib/aia/directives/models.rb +178 -0
  50. data/lib/aia/directives/registry.rb +120 -0
  51. data/lib/aia/directives/utility.rb +70 -0
  52. data/lib/aia/directives/web_and_file.rb +71 -0
  53. data/lib/aia/prompt_handler.rb +23 -3
  54. data/lib/aia/ruby_llm_adapter.rb +307 -128
  55. data/lib/aia/session.rb +27 -14
  56. data/lib/aia/utility.rb +12 -8
  57. data/lib/aia.rb +11 -2
  58. data/lib/extensions/ruby_llm/.irbrc +56 -0
  59. data/mkdocs.yml +165 -0
  60. metadata +77 -20
  61. /data/{images → docs/assets/images}/aia.png +0 -0
@@ -1,39 +1,48 @@
1
1
  # lib/aia/ruby_llm_adapter.rb
2
2
 
3
+ require 'async'
4
+
3
5
  module AIA
4
6
  class RubyLLMAdapter
5
7
  attr_reader :tools
6
8
 
7
9
  def initialize
8
- @provider, @model = extract_model_parts.values
10
+ @models = extract_models_config
11
+ @chats = {}
9
12
 
10
13
  configure_rubyllm
11
14
  refresh_local_model_registry
12
- setup_chat_with_tools
15
+ setup_chats_with_tools
13
16
  end
14
17
 
18
+
15
19
  def configure_rubyllm
16
20
  # TODO: Add some of these configuration items to AIA.config
17
21
  RubyLLM.configure do |config|
18
- config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
19
- config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
20
- config.openai_project_id = ENV.fetch('OPENAI_PROJECT_ID', nil)
21
-
22
22
  config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
23
- config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
24
23
  config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
24
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
25
+ config.gpustack_api_key = ENV.fetch('GPUSTACK_API_KEY', nil)
26
+ config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
25
27
  config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
28
+ config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
29
+
30
+ # These providers require a little something extra
31
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
32
+ config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
33
+ config.openai_project_id = ENV.fetch('OPENAI_PROJECT_ID', nil)
26
34
 
27
35
  config.bedrock_api_key = ENV.fetch('BEDROCK_ACCESS_KEY_ID', nil)
28
36
  config.bedrock_secret_key = ENV.fetch('BEDROCK_SECRET_ACCESS_KEY', nil)
29
37
  config.bedrock_region = ENV.fetch('BEDROCK_REGION', nil)
30
38
  config.bedrock_session_token = ENV.fetch('BEDROCK_SESSION_TOKEN', nil)
31
39
 
32
- config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)
40
+ # Ollama is based upon the OpenAI API so it needs to over-ride a few things
41
+ config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)
33
42
 
34
43
  # --- Custom OpenAI Endpoint ---
35
44
  # Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
36
- config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
45
+ config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
37
46
 
38
47
  # --- Default Models ---
39
48
  # Used by RubyLLM.chat, RubyLLM.embed, RubyLLM.paint if no model is specified.
@@ -42,49 +51,97 @@ module AIA
42
51
  # config.default_image_model = 'dall-e-3' # Default: 'dall-e-3'
43
52
 
44
53
  # --- Connection Settings ---
45
- # config.request_timeout = 120 # Request timeout in seconds (default: 120)
46
- # config.max_retries = 3 # Max retries on transient network errors (default: 3)
47
- # config.retry_interval = 0.1 # Initial delay in seconds (default: 0.1)
48
- # config.retry_backoff_factor = 2 # Multiplier for subsequent retries (default: 2)
49
- # config.retry_interval_randomness = 0.5 # Jitter factor (default: 0.5)
50
-
51
- # --- Logging Settings ---
54
+ config.request_timeout = 120 # Request timeout in seconds (default: 120)
55
+ config.max_retries = 3 # Max retries on transient network errors (default: 3)
56
+ config.retry_interval = 0.1 # Initial delay in seconds (default: 0.1)
57
+ config.retry_backoff_factor = 2 # Multiplier for subsequent retries (default: 2)
58
+ config.retry_interval_randomness = 0.5 # Jitter factor (default: 0.5)
59
+
60
+ # Connection pooling settings removed - not supported in current RubyLLM version
61
+ # config.connection_pool_size = 10 # Number of connections to maintain in pool
62
+ # config.connection_pool_timeout = 60 # Connection pool timeout in seconds
52
63
  # config.log_file = '/logs/ruby_llm.log'
53
- config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
64
+ config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
54
65
  end
55
66
  end
56
67
 
68
+
57
69
  def refresh_local_model_registry
58
70
  if AIA.config.refresh.nil? ||
59
71
  Integer(AIA.config.refresh).zero? ||
60
72
  Date.today > (AIA.config.last_refresh + Integer(AIA.config.refresh))
61
73
  RubyLLM.models.refresh!
62
74
  AIA.config.last_refresh = Date.today
63
- if AIA.config.config_file
64
- AIA::Config.dump_config(AIA.config, AIA.config.config_file)
65
- end
75
+ AIA::Config.dump_config(AIA.config, AIA.config.config_file) if AIA.config.config_file
66
76
  end
67
77
  end
68
78
 
69
79
 
70
- def setup_chat_with_tools
71
- begin
72
- @chat = RubyLLM.chat(model: @model)
73
- @model = @chat.model.name if @model.nil? # using default model
74
- rescue => e
75
- STDERR.puts "ERROR: #{e.message}"
80
+ def setup_chats_with_tools
81
+ valid_chats = {}
82
+ failed_models = []
83
+
84
+ @models.each do |model_name|
85
+ begin
86
+ chat = RubyLLM.chat(model: model_name)
87
+ valid_chats[model_name] = chat
88
+ rescue StandardError => e
89
+ failed_models << "#{model_name}: #{e.message}"
90
+ end
91
+ end
92
+
93
+ # Report failed models but continue with valid ones
94
+ unless failed_models.empty?
95
+ puts "\n❌ Failed to initialize the following models:"
96
+ failed_models.each { |failure| puts " - #{failure}" }
97
+ end
98
+
99
+ # If no models initialized successfully, exit
100
+ if valid_chats.empty?
101
+ puts "\n❌ No valid models could be initialized. Exiting."
102
+ puts "\n💡 Available models can be listed with: bin/aia --help models"
76
103
  exit 1
77
104
  end
78
105
 
79
- unless @chat.model.supports_functions?
80
- AIA.config.tools = []
81
- AIA.config.tool_names = ""
82
- return
106
+ @chats = valid_chats
107
+ @models = valid_chats.keys
108
+
109
+ # Update the config to reflect only the valid models
110
+ AIA.config.model = @models
111
+
112
+ # Report successful models
113
+ if failed_models.any?
114
+ puts "\n✅ Successfully initialized: #{@models.join(', ')}"
115
+ puts
116
+ end
117
+
118
+ # Use the first chat to determine tool support (assuming all models have similar tool support)
119
+ first_chat = @chats.values.first
120
+ return unless first_chat&.model&.supports_functions?
121
+
122
+ load_tools_lazy_mcp_support_only_when_needed
123
+
124
+ @chats.each_value do |chat|
125
+ chat.with_tools(*tools) unless tools.empty?
83
126
  end
127
+ end
128
+
84
129
 
85
- load_tools
130
+ def load_tools_lazy_mcp_support_only_when_needed
131
+ @tools = []
132
+
133
+ support_local_tools
134
+ support_mcp_lazy
135
+ filter_tools_by_allowed_list
136
+ filter_tools_by_rejected_list
137
+ drop_duplicate_tools
86
138
 
87
- @chat.with_tools(*tools) unless tools.empty?
139
+ if tools.empty?
140
+ AIA.config.tool_names = ''
141
+ else
142
+ AIA.config.tool_names = @tools.map(&:name).join(', ')
143
+ AIA.config.tools = @tools
144
+ end
88
145
  end
89
146
 
90
147
 
@@ -98,7 +155,7 @@ module AIA
98
155
  drop_duplicate_tools
99
156
 
100
157
  if tools.empty?
101
- AIA.config.tool_names = ""
158
+ AIA.config.tool_names = ''
102
159
  else
103
160
  AIA.config.tool_names = @tools.map(&:name).join(', ')
104
161
  AIA.config.tools = @tools
@@ -113,11 +170,24 @@ module AIA
113
170
  end
114
171
 
115
172
 
173
+ def support_mcp_lazy
174
+ # Only load MCP tools if MCP servers are actually configured
175
+ return if AIA.config.mcp_servers.nil? || AIA.config.mcp_servers.empty?
176
+
177
+ begin
178
+ RubyLLM::MCP.establish_connection
179
+ @tools += RubyLLM::MCP.tools
180
+ rescue StandardError => e
181
+ warn "Warning: Failed to connect MCP clients: #{e.message}"
182
+ end
183
+ end
184
+
185
+
116
186
  def support_mcp
117
187
  RubyLLM::MCP.establish_connection
118
188
  @tools += RubyLLM::MCP.tools
119
- rescue => e
120
- STDERR.puts "Warning: Failed to connect MCP clients: #{e.message}"
189
+ rescue StandardError => e
190
+ warn "Warning: Failed to connect MCP clients: #{e.message}"
121
191
  end
122
192
 
123
193
 
@@ -128,7 +198,7 @@ module AIA
128
198
  @tools.select! do |tool|
129
199
  tool_name = tool.name
130
200
  if seen_names.include?(tool_name)
131
- STDERR.puts "WARNING: Duplicate tool name detected: '#{tool_name}'. Only the first occurrence will be used."
201
+ warn "WARNING: Duplicate tool name detected: '#{tool_name}'. Only the first occurrence will be used."
132
202
  false
133
203
  else
134
204
  seen_names.add(tool_name)
@@ -137,110 +207,216 @@ module AIA
137
207
  end
138
208
 
139
209
  removed_count = original_size - @tools.size
140
- STDERR.puts "Removed #{removed_count} duplicate tools" if removed_count > 0
210
+ warn "Removed #{removed_count} duplicate tools" if removed_count > 0
141
211
  end
142
212
 
143
213
 
144
- # TODO: Need to rethink this dispatcher pattern w/r/t RubyLLM's capabilities
145
- # This code was originally designed for AiClient
146
- #
147
214
  def chat(prompt)
148
- modes = @chat.model.modalities
215
+ if @models.size == 1
216
+ # Single model - use the original behavior
217
+ single_model_chat(prompt, @models.first)
218
+ else
219
+ # Multiple models - use concurrent processing
220
+ multi_model_chat(prompt)
221
+ end
222
+ end
223
+
224
+ def single_model_chat(prompt, model_name)
225
+ chat_instance = @chats[model_name]
226
+ modes = chat_instance.model.modalities
149
227
 
150
228
  # TODO: Need to consider how to handle multi-mode models
151
229
  if modes.text_to_text?
152
- text_to_text(prompt)
153
-
230
+ text_to_text_single(prompt, model_name)
154
231
  elsif modes.image_to_text?
155
- image_to_text(prompt)
232
+ image_to_text_single(prompt, model_name)
156
233
  elsif modes.text_to_image?
157
- text_to_image(prompt)
158
-
234
+ text_to_image_single(prompt, model_name)
159
235
  elsif modes.text_to_audio?
160
- text_to_audio(prompt)
236
+ text_to_audio_single(prompt, model_name)
161
237
  elsif modes.audio_to_text?
162
- audio_to_text(prompt)
163
-
238
+ audio_to_text_single(prompt, model_name)
164
239
  else
165
240
  # TODO: what else can be done?
166
241
  end
167
242
  end
168
243
 
244
+ def multi_model_chat(prompt)
245
+ results = {}
246
+
247
+ Async do |task|
248
+ @models.each do |model_name|
249
+ task.async do
250
+ begin
251
+ result = single_model_chat(prompt, model_name)
252
+ results[model_name] = result
253
+ rescue StandardError => e
254
+ results[model_name] = "Error with #{model_name}: #{e.message}"
255
+ end
256
+ end
257
+ end
258
+ end
259
+
260
+ # Format and return results from all models
261
+ format_multi_model_results(results)
262
+ end
263
+
264
+ def format_multi_model_results(results)
265
+ use_consensus = should_use_consensus_mode?
266
+
267
+ if use_consensus
268
+ # Generate consensus response using primary model
269
+ generate_consensus_response(results)
270
+ else
271
+ # Show individual responses from all models
272
+ format_individual_responses(results)
273
+ end
274
+ end
275
+
276
+ def should_use_consensus_mode?
277
+ # Only use consensus when explicitly enabled with --consensus flag
278
+ AIA.config.consensus == true
279
+ end
280
+
281
+ def generate_consensus_response(results)
282
+ primary_model = @models.first
283
+ primary_chat = @chats[primary_model]
284
+
285
+ # Build the consensus prompt with all model responses
286
+ consensus_prompt = build_consensus_prompt(results)
287
+
288
+ begin
289
+ # Have the primary model generate the consensus
290
+ consensus_result = primary_chat.ask(consensus_prompt).content
291
+
292
+ # Format the consensus response
293
+ "from: #{primary_model} (consensus)\n#{consensus_result}"
294
+ rescue StandardError => e
295
+ # If consensus fails, fall back to individual responses
296
+ "Error generating consensus: #{e.message}\n\n" + format_individual_responses(results)
297
+ end
298
+ end
299
+
300
+ def build_consensus_prompt(results)
301
+ prompt_parts = []
302
+ prompt_parts << "You are tasked with creating a consensus response based on multiple AI model responses to the same query."
303
+ prompt_parts << "Please analyze the following responses and provide a unified, comprehensive answer that:"
304
+ prompt_parts << "- Incorporates the best insights from all models"
305
+ prompt_parts << "- Resolves any contradictions with clear reasoning"
306
+ prompt_parts << "- Provides additional context or clarification when helpful"
307
+ prompt_parts << "- Maintains accuracy and avoids speculation"
308
+ prompt_parts << ""
309
+ prompt_parts << "Model responses:"
310
+ prompt_parts << ""
311
+
312
+ results.each do |model_name, result|
313
+ next if result.to_s.start_with?("Error with")
314
+ prompt_parts << "#{model_name}:"
315
+ prompt_parts << result.to_s
316
+ prompt_parts << ""
317
+ end
318
+
319
+ prompt_parts << "Please provide your consensus response:"
320
+ prompt_parts.join("\n")
321
+ end
322
+
323
+ def format_individual_responses(results)
324
+ output = []
325
+ results.each do |model_name, result|
326
+ output << "from: #{model_name}"
327
+ output << result
328
+ output << "" # Add blank line between results
329
+ end
330
+ output.join("\n")
331
+ end
332
+
333
+
169
334
  def transcribe(audio_file)
170
- @chat.ask("Transcribe this audio", with: audio_file).content
335
+ # Use the first model for transcription
336
+ first_model = @models.first
337
+ @chats[first_model].ask('Transcribe this audio', with: audio_file).content
171
338
  end
172
339
 
173
- def speak(text)
340
+
341
+ def speak(_text)
174
342
  output_file = "#{Time.now.to_i}.mp3"
175
343
 
176
- # Note: RubyLLM doesn't have a direct text-to-speech feature
344
+ # NOTE: RubyLLM doesn't have a direct text-to-speech feature
177
345
  # This is a placeholder for a custom implementation or external service
178
346
  begin
179
347
  # Try using a TTS API if available
180
348
  # For now, we'll use a mock implementation
181
- File.write(output_file, "Mock TTS audio content")
182
- system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
349
+ File.write(output_file, 'Mock TTS audio content')
350
+ if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
351
+ system("#{AIA.config.speak_command} #{output_file}")
352
+ end
183
353
  "Audio generated and saved to: #{output_file}"
184
- rescue => e
354
+ rescue StandardError => e
185
355
  "Error generating audio: #{e.message}"
186
356
  end
187
357
  end
188
358
 
359
+
189
360
  # Clear the chat context/history
190
361
  # Needed for the //clear directive
191
362
  def clear_context
192
- begin
363
+ @chats.each do |model_name, chat|
193
364
  # Option 1: Directly clear the messages array in the current chat object
194
- if @chat.instance_variable_defined?(:@messages)
195
- old_messages = @chat.instance_variable_get(:@messages)
365
+ if chat.instance_variable_defined?(:@messages)
366
+ chat.instance_variable_get(:@messages)
196
367
  # Force a completely empty array, not just attempting to clear it
197
- @chat.instance_variable_set(:@messages, [])
368
+ chat.instance_variable_set(:@messages, [])
198
369
  end
370
+ end
199
371
 
200
- # Option 2: Force RubyLLM to create a new chat instance at the global level
201
- # This ensures any shared state is reset
202
- @provider, @model = extract_model_parts.values
203
- RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
372
+ # Option 2: Force RubyLLM to create a new chat instance at the global level
373
+ # This ensures any shared state is reset
374
+ RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
204
375
 
205
- # Option 3: Create a completely fresh chat instance for this adapter
206
- @chat = nil # First nil it to help garbage collection
376
+ # Option 3: Create completely fresh chat instances for this adapter
377
+ @chats = {} # First nil the chats hash
207
378
 
208
- begin
209
- @chat = RubyLLM.chat(model: @model)
210
- rescue => e
211
- STDERR.puts "ERROR: #{e.message}"
212
- exit 1
379
+ begin
380
+ @models.each do |model_name|
381
+ @chats[model_name] = RubyLLM.chat(model: model_name)
213
382
  end
383
+ rescue StandardError => e
384
+ warn "ERROR: #{e.message}"
385
+ exit 1
386
+ end
214
387
 
215
- # Option 4: Call official clear_history method if it exists
216
- if @chat.respond_to?(:clear_history)
217
- @chat.clear_history
218
- end
388
+ # Option 4: Call official clear_history method if it exists
389
+ @chats.each_value do |chat|
390
+ chat.clear_history if chat.respond_to?(:clear_history)
391
+ end
219
392
 
220
- # Option 5: If chat has messages, force set it to empty again as a final check
221
- if @chat.instance_variable_defined?(:@messages) && !@chat.instance_variable_get(:@messages).empty?
222
- @chat.instance_variable_set(:@messages, [])
393
+ # Final verification
394
+ @chats.each_value do |chat|
395
+ if chat.instance_variable_defined?(:@messages) && !chat.instance_variable_get(:@messages).empty?
396
+ chat.instance_variable_set(:@messages, [])
223
397
  end
224
-
225
- # Final verification
226
- new_messages = @chat.instance_variable_defined?(:@messages) ? @chat.instance_variable_get(:@messages) : []
227
-
228
- return "Chat context successfully cleared."
229
- rescue => e
230
- return "Error clearing chat context: #{e.message}"
231
398
  end
399
+
400
+ return 'Chat context successfully cleared.'
401
+ rescue StandardError => e
402
+ return "Error clearing chat context: #{e.message}"
232
403
  end
233
404
 
405
+
234
406
  def method_missing(method, *args, &block)
235
- if @chat.respond_to?(method)
236
- @chat.public_send(method, *args, &block)
407
+ # Use the first chat instance for backward compatibility with method_missing
408
+ first_chat = @chats.values.first
409
+ if first_chat&.respond_to?(method)
410
+ first_chat.public_send(method, *args, &block)
237
411
  else
238
412
  super
239
413
  end
240
414
  end
241
415
 
416
+
242
417
  def respond_to_missing?(method, include_private = false)
243
- @chat.respond_to?(method) || super
418
+ # Check if any of our chat instances respond to the method
419
+ @chats.values.any? { |chat| chat.respond_to?(method) } || super
244
420
  end
245
421
 
246
422
  private
@@ -254,6 +430,7 @@ module AIA
254
430
  end
255
431
  end
256
432
 
433
+
257
434
  def filter_tools_by_rejected_list
258
435
  return if AIA.config.rejected_tools.nil?
259
436
 
@@ -263,24 +440,21 @@ module AIA
263
440
  end
264
441
  end
265
442
 
266
- def extract_model_parts
267
- parts = AIA.config.model.split('/')
268
- parts.map!(&:strip)
269
443
 
270
- if 2 == parts.length
271
- provider = parts[0]
272
- model = parts[1]
273
- elsif 1 == parts.length
274
- provider = nil # RubyLLM will figure it out from the model name
275
- model = parts[0]
444
+ def extract_models_config
445
+ models_config = AIA.config.model
446
+
447
+ # Handle backward compatibility - if it's a string, convert to array
448
+ if models_config.is_a?(String)
449
+ [models_config]
450
+ elsif models_config.is_a?(Array)
451
+ models_config
276
452
  else
277
- STDERR.puts "ERROR: malformed model name: #{AIA.config.model}"
278
- exit 1
453
+ ['gpt-4o-mini'] # fallback to default
279
454
  end
280
-
281
- { provider: provider, model: model }
282
455
  end
283
456
 
457
+
284
458
  def extract_text_prompt(prompt)
285
459
  if prompt.is_a?(String)
286
460
  prompt
@@ -297,16 +471,17 @@ module AIA
297
471
  #########################################
298
472
  ## text
299
473
 
300
- def text_to_text(prompt)
474
+ def text_to_text_single(prompt, model_name)
475
+ chat_instance = @chats[model_name]
301
476
  text_prompt = extract_text_prompt(prompt)
302
- response = if AIA.config.context_files.empty?
303
- @chat.ask(text_prompt)
304
- else
305
- @chat.ask(text_prompt, with: AIA.config.context_files)
306
- end
477
+ response = if AIA.config.context_files.empty?
478
+ chat_instance.ask(text_prompt)
479
+ else
480
+ chat_instance.ask(text_prompt, with: AIA.config.context_files)
481
+ end
307
482
 
308
483
  response.content
309
- rescue => e
484
+ rescue StandardError => e
310
485
  e.message
311
486
  end
312
487
 
@@ -316,16 +491,15 @@ module AIA
316
491
 
317
492
  def extract_image_path(prompt)
318
493
  if prompt.is_a?(String)
319
- match = prompt.match(/\b[\w\/\.\-_]+?\.(jpg|jpeg|png|gif|webp)\b/i)
494
+ match = prompt.match(%r{\b[\w/.\-_]+?\.(jpg|jpeg|png|gif|webp)\b}i)
320
495
  match ? match[0] : nil
321
496
  elsif prompt.is_a?(Hash)
322
497
  prompt[:image] || prompt[:image_path]
323
- else
324
- nil
325
498
  end
326
499
  end
327
500
 
328
- def text_to_image(prompt)
501
+
502
+ def text_to_image_single(prompt, model_name)
329
503
  text_prompt = extract_text_prompt(prompt)
330
504
  image_name = extract_image_path(text_prompt)
331
505
 
@@ -337,23 +511,24 @@ module AIA
337
511
  else
338
512
  "Image generated and available at: #{image.url}"
339
513
  end
340
- rescue => e
514
+ rescue StandardError => e
341
515
  "Error generating image: #{e.message}"
342
516
  end
343
517
  end
344
518
 
345
- def image_to_text(prompt)
519
+
520
+ def image_to_text_single(prompt, model_name)
346
521
  image_path = extract_image_path(prompt)
347
522
  text_prompt = extract_text_prompt(prompt)
348
523
 
349
524
  if image_path && File.exist?(image_path)
350
525
  begin
351
- @chat.ask(text_prompt, with: image_path).content
352
- rescue => e
526
+ @chats[model_name].ask(text_prompt, with: image_path).content
527
+ rescue StandardError => e
353
528
  "Error analyzing image: #{e.message}"
354
529
  end
355
530
  else
356
- text_to_text(prompt)
531
+ text_to_text_single(prompt, model_name)
357
532
  end
358
533
  end
359
534
 
@@ -365,22 +540,26 @@ module AIA
365
540
  filepath.to_s.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
366
541
  end
367
542
 
368
- def text_to_audio(prompt)
543
+
544
+ def text_to_audio_single(prompt, model_name)
369
545
  text_prompt = extract_text_prompt(prompt)
370
546
  output_file = "#{Time.now.to_i}.mp3"
371
547
 
372
548
  begin
373
- # Note: RubyLLM doesn't have a direct TTS feature
549
+ # NOTE: RubyLLM doesn't have a direct TTS feature
374
550
  # TODO: This is a placeholder for a custom implementation
375
551
  File.write(output_file, text_prompt)
376
- system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
552
+ if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
553
+ system("#{AIA.config.speak_command} #{output_file}")
554
+ end
377
555
  "Audio generated and saved to: #{output_file}"
378
- rescue => e
556
+ rescue StandardError => e
379
557
  "Error generating audio: #{e.message}"
380
558
  end
381
559
  end
382
560
 
383
- def audio_to_text(prompt)
561
+
562
+ def audio_to_text_single(prompt, model_name)
384
563
  text_prompt = extract_text_prompt(prompt)
385
564
  text_prompt = 'Transcribe this audio' if text_prompt.nil? || text_prompt.empty?
386
565
 
@@ -391,18 +570,18 @@ module AIA
391
570
  File.exist?(prompt) &&
392
571
  audio_file?(prompt)
393
572
  begin
394
- response = if AIA.config.context_files.empty?
395
- @chat.ask(text_prompt)
396
- else
397
- @chat.ask(text_prompt, with: AIA.config.context_files)
398
- end
573
+ response = if AIA.config.context_files.empty?
574
+ @chats[model_name].ask(text_prompt)
575
+ else
576
+ @chats[model_name].ask(text_prompt, with: AIA.config.context_files)
577
+ end
399
578
  response.content
400
- rescue => e
579
+ rescue StandardError => e
401
580
  "Error transcribing audio: #{e.message}"
402
581
  end
403
582
  else
404
583
  # Fall back to regular chat if no valid audio file is found
405
- text_to_text(prompt)
584
+ text_to_text_single(prompt, model_name)
406
585
  end
407
586
  end
408
587
  end