aia 0.9.23 → 0.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/.version +1 -1
  3. data/CHANGELOG.md +95 -3
  4. data/README.md +187 -60
  5. data/bin/aia +6 -0
  6. data/docs/cli-reference.md +145 -72
  7. data/docs/configuration.md +156 -19
  8. data/docs/directives-reference.md +28 -8
  9. data/docs/examples/tools/index.md +2 -2
  10. data/docs/faq.md +11 -11
  11. data/docs/guides/available-models.md +11 -11
  12. data/docs/guides/basic-usage.md +18 -17
  13. data/docs/guides/chat.md +57 -11
  14. data/docs/guides/executable-prompts.md +15 -15
  15. data/docs/guides/first-prompt.md +2 -2
  16. data/docs/guides/getting-started.md +6 -6
  17. data/docs/guides/image-generation.md +24 -24
  18. data/docs/guides/local-models.md +2 -2
  19. data/docs/guides/models.md +96 -18
  20. data/docs/guides/tools.md +4 -4
  21. data/docs/index.md +2 -2
  22. data/docs/installation.md +2 -2
  23. data/docs/prompt_management.md +11 -11
  24. data/docs/security.md +3 -3
  25. data/docs/workflows-and-pipelines.md +1 -1
  26. data/examples/README.md +6 -6
  27. data/examples/headlines +3 -3
  28. data/lib/aia/aia_completion.bash +2 -2
  29. data/lib/aia/aia_completion.fish +4 -4
  30. data/lib/aia/aia_completion.zsh +2 -2
  31. data/lib/aia/chat_processor_service.rb +31 -21
  32. data/lib/aia/config/cli_parser.rb +403 -403
  33. data/lib/aia/config/config_section.rb +87 -0
  34. data/lib/aia/config/defaults.yml +219 -0
  35. data/lib/aia/config/defaults_loader.rb +147 -0
  36. data/lib/aia/config/mcp_parser.rb +151 -0
  37. data/lib/aia/config/model_spec.rb +67 -0
  38. data/lib/aia/config/validator.rb +185 -136
  39. data/lib/aia/config.rb +336 -17
  40. data/lib/aia/directive_processor.rb +14 -6
  41. data/lib/aia/directives/checkpoint.rb +283 -0
  42. data/lib/aia/directives/configuration.rb +27 -98
  43. data/lib/aia/directives/models.rb +15 -9
  44. data/lib/aia/directives/registry.rb +2 -0
  45. data/lib/aia/directives/utility.rb +25 -9
  46. data/lib/aia/directives/web_and_file.rb +50 -47
  47. data/lib/aia/logger.rb +328 -0
  48. data/lib/aia/prompt_handler.rb +18 -22
  49. data/lib/aia/ruby_llm_adapter.rb +584 -65
  50. data/lib/aia/session.rb +49 -156
  51. data/lib/aia/topic_context.rb +125 -0
  52. data/lib/aia/ui_presenter.rb +20 -16
  53. data/lib/aia/utility.rb +50 -18
  54. data/lib/aia.rb +91 -66
  55. data/lib/extensions/ruby_llm/modalities.rb +2 -0
  56. data/mcp_servers/apple-mcp.json +8 -0
  57. data/mcp_servers/mcp_server_chart.json +11 -0
  58. data/mcp_servers/playwright_one.json +8 -0
  59. data/mcp_servers/playwright_two.json +8 -0
  60. data/mcp_servers/tavily_mcp_server.json +8 -0
  61. metadata +85 -26
  62. data/lib/aia/config/base.rb +0 -288
  63. data/lib/aia/config/defaults.rb +0 -91
  64. data/lib/aia/config/file_loader.rb +0 -163
  65. data/lib/aia/context_manager.rb +0 -134
  66. data/mcp_servers/imcp.json +0 -7
  67. data/mcp_servers/launcher.json +0 -11
  68. data/mcp_servers/timeserver.json +0 -8
data/lib/aia/session.rb CHANGED
@@ -9,7 +9,6 @@ require "fileutils"
9
9
  require "amazing_print"
10
10
  require_relative "directive_processor"
11
11
  require_relative "history_manager"
12
- require_relative "context_manager"
13
12
  require_relative "ui_presenter"
14
13
  require_relative "chat_processor_service"
15
14
  require_relative "prompt_handler"
@@ -45,36 +44,18 @@ module AIA
45
44
  end
46
45
 
47
46
  def initialize_components
48
- # For multi-model: create separate context manager per model (ADR-002 revised + ADR-005)
49
- # For single-model: maintain backward compatibility with single context manager
50
- if AIA.config.model.is_a?(Array) && AIA.config.model.size > 1
51
- @context_managers = {}
52
- AIA.config.model.each do |model_spec|
53
- # Handle both old string format and new hash format (ADR-005)
54
- internal_id = if model_spec.is_a?(Hash)
55
- model_spec[:internal_id]
56
- else
57
- model_spec
58
- end
59
-
60
- @context_managers[internal_id] = ContextManager.new(
61
- system_prompt: AIA.config.system_prompt
62
- )
63
- end
64
- @context_manager = nil # Signal we're using per-model managers
65
- else
66
- @context_manager = ContextManager.new(system_prompt: AIA.config.system_prompt)
67
- @context_managers = nil
68
- end
69
-
47
+ # RubyLLM's Chat instances maintain conversation history internally
48
+ # via @messages array. No separate context manager needed.
49
+ # Checkpoint/restore directives access Chat.@messages directly via AIA.client.chats
70
50
  @ui_presenter = UIPresenter.new
71
51
  @directive_processor = DirectiveProcessor.new
72
52
  @chat_processor = ChatProcessorService.new(@ui_presenter, @directive_processor)
73
53
  end
74
54
 
75
55
  def setup_output_file
76
- if AIA.config.out_file && !AIA.config.out_file.nil? && !AIA.append? && File.exist?(AIA.config.out_file)
77
- File.open(AIA.config.out_file, "w") { } # Truncate the file
56
+ out_file = AIA.config.output.file
57
+ if out_file && !out_file.nil? && !AIA.append? && File.exist?(out_file)
58
+ File.open(out_file, "w") { } # Truncate the file
78
59
  end
79
60
  end
80
61
 
@@ -137,7 +118,7 @@ module AIA
137
118
  end
138
119
 
139
120
  def setup_prompt_processing(prompt_id)
140
- role_id = AIA.config.role
121
+ role_id = AIA.config.prompts.role
141
122
 
142
123
  begin
143
124
  prompt = @prompt_handler.get_prompt(prompt_id, role_id)
@@ -225,23 +206,21 @@ module AIA
225
206
  end
226
207
 
227
208
  # Send prompt to AI and handle the response
209
+ # RubyLLM's Chat automatically adds user messages and responses to its internal @messages
228
210
  def send_prompt_and_get_response(prompt_text)
229
- # Add prompt to conversation context
230
- @context_manager.add_to_context(role: "user", content: prompt_text)
231
-
232
- # Process the prompt
211
+ # Process the prompt - RubyLLM Chat maintains conversation history internally
233
212
  @ui_presenter.display_thinking_animation
234
- response = @chat_processor.process_prompt(@context_manager.get_context)
213
+ response_data = @chat_processor.process_prompt(prompt_text)
235
214
 
236
- # Add AI response to context
237
- @context_manager.add_to_context(role: "assistant", content: response)
215
+ # Handle response format (may include metrics)
216
+ content = response_data.is_a?(Hash) ? response_data[:content] : response_data
238
217
 
239
218
  # Output the response
240
- @chat_processor.output_response(response)
219
+ @chat_processor.output_response(content)
241
220
 
242
221
  # Process any directives in the response
243
- if @directive_processor.directive?(response)
244
- directive_result = @directive_processor.process(response, @context_manager)
222
+ if @directive_processor.directive?(content)
223
+ directive_result = @directive_processor.process(content, nil)
245
224
  puts "\nDirective output: #{directive_result}" if directive_result && !directive_result.strip.empty?
246
225
  end
247
226
  end
@@ -315,19 +294,16 @@ module AIA
315
294
 
316
295
  return if context.empty?
317
296
 
318
- # Add context files content to context
319
- @context_manager.add_to_context(role: "user", content: context)
320
-
321
- # Process the context
297
+ # Process the context - RubyLLM Chat maintains conversation history internally
322
298
  @ui_presenter.display_thinking_animation
323
- response = @chat_processor.process_prompt(@context_manager.get_context)
299
+ response_data = @chat_processor.process_prompt(context)
324
300
 
325
- # Add AI response to context
326
- @context_manager.add_to_context(role: "assistant", content: response)
301
+ # Handle response format (may include metrics)
302
+ content = response_data.is_a?(Hash) ? response_data[:content] : response_data
327
303
 
328
304
  # Output the response
329
- @chat_processor.output_response(response)
330
- @chat_processor.speak(response)
305
+ @chat_processor.output_response(content)
306
+ @chat_processor.speak(content)
331
307
  @ui_presenter.display_separator
332
308
  end
333
309
 
@@ -347,14 +323,15 @@ module AIA
347
323
  @chat_prompt.text = piped_input
348
324
  processed_input = @chat_prompt.to_s
349
325
 
350
- @context_manager.add_to_context(role: "user", content: processed_input)
351
-
326
+ # Process the piped input - RubyLLM Chat maintains conversation history internally
352
327
  @ui_presenter.display_thinking_animation
353
- response = @chat_processor.process_prompt(@context_manager.get_context)
328
+ response_data = @chat_processor.process_prompt(processed_input)
329
+
330
+ # Handle response format (may include metrics)
331
+ content = response_data.is_a?(Hash) ? response_data[:content] : response_data
354
332
 
355
- @context_manager.add_to_context(role: "assistant", content: response)
356
- @chat_processor.output_response(response)
357
- @chat_processor.speak(response) if AIA.speak?
333
+ @chat_processor.output_response(content)
334
+ @chat_processor.speak(content) if AIA.speak?
358
335
  @ui_presenter.display_separator
359
336
 
360
337
  STDIN.reopen(original_stdin)
@@ -375,8 +352,8 @@ module AIA
375
352
 
376
353
  break if follow_up_prompt.nil? || follow_up_prompt.strip.downcase == "exit" || follow_up_prompt.strip.empty?
377
354
 
378
- if AIA.config.out_file
379
- File.open(AIA.config.out_file, "a") do |file|
355
+ if AIA.config.output.file
356
+ File.open(AIA.config.output.file, "a") do |file|
380
357
  file.puts "\nYou: #{follow_up_prompt}"
381
358
  end
382
359
  end
@@ -389,29 +366,10 @@ module AIA
389
366
  @chat_prompt.text = follow_up_prompt
390
367
  processed_prompt = @chat_prompt.to_s
391
368
 
392
- # Handle per-model contexts (ADR-002 revised)
393
- if @context_managers
394
- # Multi-model: add user prompt to each model's context
395
- @context_managers.each_value do |ctx_mgr|
396
- ctx_mgr.add_to_context(role: "user", content: processed_prompt)
397
- end
398
-
399
- # Get per-model conversations
400
- conversations = {}
401
- @context_managers.each do |model_name, ctx_mgr|
402
- conversations[model_name] = ctx_mgr.get_context
403
- end
404
-
405
- @ui_presenter.display_thinking_animation
406
- response_data = @chat_processor.process_prompt(conversations)
407
- else
408
- # Single-model: use original logic
409
- @context_manager.add_to_context(role: "user", content: processed_prompt)
410
- conversation = @context_manager.get_context
411
-
412
- @ui_presenter.display_thinking_animation
413
- response_data = @chat_processor.process_prompt(conversation)
414
- end
369
+ # Process the prompt - RubyLLM Chat maintains conversation history internally
370
+ # via @messages array. Each model's Chat instance tracks its own conversation.
371
+ @ui_presenter.display_thinking_animation
372
+ response_data = @chat_processor.process_prompt(processed_prompt)
415
373
 
416
374
  # Handle new response format with metrics
417
375
  if response_data.is_a?(Hash)
@@ -426,8 +384,8 @@ module AIA
426
384
 
427
385
  @ui_presenter.display_ai_response(content)
428
386
 
429
- # Display metrics if enabled and available (chat mode only)
430
- if AIA.config.show_metrics
387
+ # Display token usage if enabled and available (chat mode only)
388
+ if AIA.config.flags.tokens
431
389
  if multi_metrics
432
390
  # Display metrics for each model in multi-model mode
433
391
  @ui_presenter.display_multi_model_metrics(multi_metrics)
@@ -437,21 +395,6 @@ module AIA
437
395
  end
438
396
  end
439
397
 
440
- # Add responses to context (ADR-002 revised)
441
- if @context_managers
442
- # Multi-model: parse combined response and add each model's response to its own context
443
- parsed_responses = parse_multi_model_response(content)
444
- parsed_responses.each do |model_name, model_response|
445
- @context_managers[model_name]&.add_to_context(
446
- role: "assistant",
447
- content: model_response
448
- )
449
- end
450
- else
451
- # Single-model: add response to single context
452
- @context_manager.add_to_context(role: "assistant", content: content)
453
- end
454
-
455
398
  @chat_processor.speak(content)
456
399
 
457
400
  @ui_presenter.display_separator
@@ -459,71 +402,21 @@ module AIA
459
402
  end
460
403
 
461
404
  def process_chat_directive(follow_up_prompt)
462
- # For multi-model, use first context manager for directives (ADR-002 revised)
463
- # TODO: Consider if directives should affect all contexts or just one
464
- context_for_directive = @context_managers ? @context_managers.values.first : @context_manager
465
- directive_output = @directive_processor.process(follow_up_prompt, context_for_directive)
466
-
467
- return handle_clear_directive if follow_up_prompt.strip.start_with?("//clear")
468
- return handle_checkpoint_directive(directive_output) if follow_up_prompt.strip.start_with?("//checkpoint")
469
- return handle_restore_directive(directive_output) if follow_up_prompt.strip.start_with?("//restore")
470
- return handle_empty_directive_output if directive_output.nil? || directive_output.strip.empty?
471
-
472
- handle_successful_directive(follow_up_prompt, directive_output)
473
- end
474
-
475
- def handle_clear_directive
476
- # Clear context manager(s) - ADR-002 revised
477
- if @context_managers
478
- # Multi-model: clear all context managers
479
- @context_managers.each_value { |ctx_mgr| ctx_mgr.clear_context(keep_system_prompt: true) }
480
- else
481
- # Single-model: clear single context manager
482
- @context_manager.clear_context(keep_system_prompt: true)
483
- end
484
-
485
- # Try clearing the client's context
486
- if AIA.config.client && AIA.config.client.respond_to?(:clear_context)
487
- begin
488
- AIA.config.client.clear_context
489
- rescue => e
490
- STDERR.puts "Warning: Error clearing client context: #{e.message}"
491
- # Continue anyway - the context manager has been cleared which is the main goal
492
- end
405
+ # Directives now access RubyLLM's Chat.@messages directly via AIA.client
406
+ # The second parameter is no longer used by checkpoint/restore/clear/review
407
+ directive_output = @directive_processor.process(follow_up_prompt, nil)
408
+
409
+ # Checkpoint-related directives (clear, checkpoint, restore, review) handle
410
+ # everything internally via the Checkpoint module, which operates directly
411
+ # on RubyLLM's Chat.@messages - no additional handling needed here.
412
+ if follow_up_prompt.strip.start_with?("//clear", "//checkpoint", "//restore", "//review", "//context")
413
+ @ui_presenter.display_info(directive_output) unless directive_output.nil? || directive_output.strip.empty?
414
+ return nil
493
415
  end
494
416
 
495
- # Note: We intentionally do NOT reinitialize the client here
496
- # as that could cause termination if model initialization fails
497
-
498
- @ui_presenter.display_info("Chat context cleared.")
499
- nil
500
- end
501
-
502
- def handle_checkpoint_directive(directive_output)
503
- @ui_presenter.display_info(directive_output)
504
- nil
505
- end
506
-
507
- def handle_restore_directive(directive_output)
508
- # If the restore was successful, we also need to refresh the client's context - ADR-002 revised
509
- if directive_output.start_with?("Context restored")
510
- # Clear the client's context without reinitializing the entire adapter
511
- if AIA.config.client && AIA.config.client.respond_to?(:clear_context)
512
- begin
513
- AIA.config.client.clear_context
514
- rescue => e
515
- STDERR.puts "Warning: Error clearing client context after restore: #{e.message}"
516
- # Continue anyway - the context manager has been restored which is the main goal
517
- end
518
- end
519
-
520
- # Note: For multi-model, only the first context manager was used for restore
521
- # This is a limitation of the current directive system
522
- # TODO: Consider supporting restore for all context managers
523
- end
417
+ return handle_empty_directive_output if directive_output.nil? || directive_output.strip.empty?
524
418
 
525
- @ui_presenter.display_info(directive_output)
526
- nil
419
+ handle_successful_directive(follow_up_prompt, directive_output)
527
420
  end
528
421
 
529
422
  def handle_empty_directive_output
@@ -579,7 +472,7 @@ module AIA
579
472
 
580
473
  def cleanup_chat_prompt
581
474
  if @chat_prompt_id
582
- puts "[DEBUG] Cleaning up chat prompt: #{@chat_prompt_id}" if AIA.debug?
475
+ logger.debug("Cleaning up chat prompt", chat_prompt_id: @chat_prompt_id)
583
476
  begin
584
477
  @chat_prompt.delete
585
478
  @chat_prompt_id = nil # Prevent repeated attempts if error occurs elsewhere
@@ -0,0 +1,125 @@
1
+ # lib/aia/topic_context.rb
2
+ # Just thinking about the problem ...
3
+ # maybe a directive like //topic [topic]
4
+ # sets manually (when present) or dynamically when not present
5
+ # and //topics - will list current topics
6
+ # thinking about the //checkpoint and //restore directives
7
+ #
8
+ module AIA
9
+ class TopicContext
10
+ attr_reader :context_size
11
+
12
+ # Initialize topic context manager
13
+ # @param context_size [Integer] max allowed bytes per topic
14
+ def initialize(context_size = 128_000)
15
+ @storage = Hash.new { |h, k| h[k] = [] } # auto-initialize empty array
16
+ @context_size = context_size
17
+ @total_chars = 0
18
+ @mutex = Mutex.new # ensure thread safety
19
+ end
20
+
21
+ # Store a request/response pair under the given topic (or auto-generate one)
22
+ # @param request [String]
23
+ # @param response [String]
24
+ # @param topic [String, nil]
25
+ # @return [String] topic name used
26
+ def store_conversation(request, response, topic = nil)
27
+ raise ArgumentError, "request and response must be strings" unless request.is_a?(String) && response.is_a?(String)
28
+
29
+ topic ||= generate_topic(request)
30
+ size = request.bytesize + response.bytesize
31
+
32
+ @mutex.synchronize do
33
+ # Add the new context
34
+ @storage[topic] << { request:, response:, size:, time: Time.now }
35
+
36
+ # Update the global total
37
+ @total_chars += size
38
+
39
+ # Trim old entries if we exceeded the per-topic limit
40
+ trim_topic(topic)
41
+ end
42
+
43
+ topic
44
+ end
45
+
46
+ # Return an array of contexts for the given topic
47
+ # @param topic [String]
48
+ # @return [Array<Hash>]
49
+ def get_conversation(topic)
50
+ @mutex.synchronize { @storage[topic] || [] }
51
+ end
52
+
53
+ # All topic names
54
+ # @return [Array<String>]
55
+ def topics
56
+ @mutex.synchronize { @storage.keys }
57
+ end
58
+
59
+ # Hash of topic => array_of_contexts
60
+ # @return [Hash<String, Array<Hash>>]
61
+ def all_conversations
62
+ @mutex.synchronize { @storage.dup }
63
+ end
64
+
65
+ # Total number of characters stored across all topics
66
+ # @return [Integer]
67
+ def total_chars
68
+ @mutex.synchronize { @total_chars }
69
+ end
70
+
71
+ # Empty the storage and reset counters
72
+ def clear
73
+ @mutex.synchronize do
74
+ @storage.clear
75
+ @total_chars = 0
76
+ end
77
+ end
78
+
79
+ # Get memory usage statistics for a topic
80
+ # @param topic [String]
81
+ # @return [Hash{Symbol => Integer}]
82
+ def topic_stats(topic)
83
+ @mutex.synchronize do
84
+ return {} unless @storage.key?(topic)
85
+
86
+ {
87
+ count: @storage[topic].length,
88
+ size: topic_total_size(topic),
89
+ avg_size: topic_total_size(topic).fdiv(@storage[topic].length),
90
+ }
91
+ end
92
+ end
93
+
94
+ private
95
+
96
+ # Topic extractor with better heuristic - uses first meaningful 3 words
97
+ # @param request [String]
98
+ # @return [String]
99
+ def generate_topic(request)
100
+ cleaned = request.downcase.gsub(/[^a-z0-9\s]/, "")
101
+ words = cleaned.split
102
+ return "general" if words.empty?
103
+
104
+ words.first(3).join("_")
105
+ end
106
+
107
+ # Remove oldest contexts from the topic until size <= @context_size
108
+ # @param topic [String]
109
+ def trim_topic(topic)
110
+ return unless @storage.key?(topic) && @storage[topic].size > 1
111
+
112
+ while topic_total_size(topic) > @context_size
113
+ removed = @storage[topic].shift # oldest context
114
+ @total_chars -= removed[:size] # adjust global counter
115
+ end
116
+ end
117
+
118
+ # Helper to compute the sum of sizes for a topic
119
+ # @param topic [String]
120
+ # @return [Integer]
121
+ def topic_total_size(topic)
122
+ @storage[topic].sum { |ctx| ctx[:size] }
123
+ end
124
+ end
125
+ end
@@ -26,8 +26,9 @@ module AIA
26
26
  puts "\nAI: "
27
27
  format_chat_response(response)
28
28
 
29
- if AIA.config.out_file && !AIA.config.out_file.nil?
30
- File.open(AIA.config.out_file, 'a') do |file|
29
+ out_file = AIA.config.output.file
30
+ if out_file && !out_file.nil?
31
+ File.open(out_file, 'a') do |file|
31
32
  file.puts "\nAI: "
32
33
  format_chat_response(response, file)
33
34
  end
@@ -125,7 +126,7 @@ module AIA
125
126
  output_lines << "═" * 55
126
127
  output_lines << "Model: #{metrics[:model_id]}"
127
128
 
128
- if AIA.config.show_cost
129
+ if AIA.config.flags.cost
129
130
  output_lines.concat(format_metrics_with_cost(metrics))
130
131
  else
131
132
  output_lines.concat(format_metrics_basic(metrics))
@@ -137,20 +138,21 @@ module AIA
137
138
  output_lines.each { |line| puts line }
138
139
 
139
140
  # Also write to file if configured
140
- if AIA.config.out_file && !AIA.config.out_file.nil?
141
- File.open(AIA.config.out_file, 'a') do |file|
141
+ out_file = AIA.config.output.file
142
+ if out_file && !out_file.nil?
143
+ File.open(out_file, 'a') do |file|
142
144
  output_lines.each { |line| file.puts line }
143
145
  end
144
146
  end
145
147
  end
146
-
148
+
147
149
  def display_multi_model_metrics(metrics_list)
148
150
  return unless metrics_list && !metrics_list.empty?
149
151
 
150
152
  output_lines = []
151
153
 
152
154
  # Determine table width based on whether costs are shown
153
- if AIA.config.show_cost
155
+ if AIA.config.flags.cost
154
156
  table_width = 80
155
157
  else
156
158
  table_width = 60
@@ -161,7 +163,7 @@ module AIA
161
163
  output_lines << "─" * table_width
162
164
 
163
165
  # Build header row
164
- if AIA.config.show_cost
166
+ if AIA.config.flags.cost
165
167
  output_lines << sprintf("%-20s %10s %10s %10s %12s %10s",
166
168
  "Model", "Input", "Output", "Total", "Cost", "x1000")
167
169
  output_lines << "─" * table_width
@@ -177,15 +179,16 @@ module AIA
177
179
  total_cost = 0.0
178
180
 
179
181
  metrics_list.each do |metrics|
180
- model_name = metrics[:model_id]
182
+ # Use display_name if available (includes role), otherwise fall back to model_id
183
+ model_name = metrics[:display_name] || metrics[:model_id]
181
184
  # Truncate model name if too long
182
- model_name = model_name[0..17] + ".." if model_name.length > 19
183
-
185
+ model_name = model_name[0..17] + ".." if model_name.to_s.length > 19
186
+
184
187
  input_tokens = metrics[:input_tokens] || 0
185
188
  output_tokens = metrics[:output_tokens] || 0
186
189
  total_tokens = input_tokens + output_tokens
187
190
 
188
- if AIA.config.show_cost
191
+ if AIA.config.flags.cost
189
192
  cost_data = calculate_cost(metrics)
190
193
  if cost_data[:available]
191
194
  cost_str = "$#{'%.5f' % cost_data[:total_cost]}"
@@ -211,7 +214,7 @@ module AIA
211
214
  output_lines << "─" * table_width
212
215
  total_tokens = total_input + total_output
213
216
 
214
- if AIA.config.show_cost && total_cost > 0
217
+ if AIA.config.flags.cost && total_cost > 0
215
218
  cost_str = "$#{'%.5f' % total_cost}"
216
219
  x1000_str = "$#{'%.2f' % (total_cost * 1000)}"
217
220
  output_lines << sprintf("%-20s %10d %10d %10d %12s %10s",
@@ -227,13 +230,14 @@ module AIA
227
230
  output_lines.each { |line| puts line }
228
231
 
229
232
  # Also write to file if configured
230
- if AIA.config.out_file && !AIA.config.out_file.nil?
231
- File.open(AIA.config.out_file, 'a') do |file|
233
+ out_file = AIA.config.output.file
234
+ if out_file && !out_file.nil?
235
+ File.open(out_file, 'a') do |file|
232
236
  output_lines.each { |line| file.puts line }
233
237
  end
234
238
  end
235
239
  end
236
-
240
+
237
241
  private
238
242
 
239
243
  def display_metrics_basic(metrics)
data/lib/aia/utility.rb CHANGED
@@ -10,22 +10,49 @@ module AIA
10
10
  end
11
11
 
12
12
  def user_tools?
13
- AIA.config&.tool_paths && !AIA.config.tool_paths.empty?
13
+ AIA.config&.tools&.paths && !AIA.config.tools.paths.empty?
14
14
  end
15
15
 
16
16
  def mcp_servers?
17
17
  AIA.config&.mcp_servers && !AIA.config.mcp_servers.empty?
18
18
  end
19
19
 
20
+ # Returns only successfully connected MCP server names
20
21
  def mcp_server_names
22
+ # Use connected_mcp_servers if available (populated during MCP setup)
23
+ connected = AIA.config&.connected_mcp_servers
24
+ return connected if connected && !connected.empty?
25
+
26
+ # Fallback to configured servers if connection status not yet known
21
27
  return [] unless mcp_servers?
22
28
  AIA.config.mcp_servers.map { |s| s[:name] || s["name"] }.compact
23
29
  end
24
30
 
31
+ # Returns true if there are any connected MCP servers
32
+ def connected_mcp_servers?
33
+ connected = AIA.config&.connected_mcp_servers
34
+ connected && !connected.empty?
35
+ end
36
+
37
+ # Returns list of failed MCP servers with their errors
38
+ def failed_mcp_servers
39
+ AIA.config&.failed_mcp_servers || []
40
+ end
41
+
25
42
  def supports_tools?
26
- AIA.config&.client&.model&.supports_functions? || false
43
+ AIA.client&.model&.supports_functions? || false
27
44
  end
28
45
 
46
+ # Returns the last refresh date from models.json modification time
47
+ def models_last_refresh
48
+ aia_dir = AIA.config&.paths&.aia_dir
49
+ return nil if aia_dir.nil?
50
+
51
+ models_file = File.join(File.expand_path(aia_dir), 'models.json')
52
+ return nil unless File.exist?(models_file)
53
+
54
+ File.mtime(models_file).strftime('%Y-%m-%d %H:%M')
55
+ end
29
56
 
30
57
  # Displays the AIA robot ASCII art
31
58
  # Yes, its slightly frivolous but it does contain some
@@ -37,40 +64,45 @@ module AIA
37
64
 
38
65
  mcp_version = defined?(RubyLLM::MCP::VERSION) ? " MCP v" + RubyLLM::MCP::VERSION : ''
39
66
 
40
- # Extract model names from config (handles hash format from ADR-005)
41
- model_display = if AIA.config&.model
42
- models = AIA.config.model
43
- if models.is_a?(String)
44
- models
45
- elsif models.is_a?(Array)
46
- if models.first.is_a?(Hash)
47
- models.map { |spec| spec[:model] }.join(', ')
67
+ # Extract model names from config (handles ModelSpec objects or Hashes)
68
+ model_display = if AIA.config&.models && !AIA.config.models.empty?
69
+ models = AIA.config.models
70
+ models.map do |spec|
71
+ if spec.is_a?(AIA::ModelSpec)
72
+ spec.name
73
+ elsif spec.is_a?(Hash)
74
+ spec[:name] || spec['name'] || spec.to_s
48
75
  else
49
- models.join(', ')
76
+ spec.to_s
50
77
  end
51
- else
52
- models.to_s
53
- end
78
+ end.join(', ')
54
79
  else
55
80
  'unknown-model'
56
81
  end
57
82
 
58
- mcp_line = mcp_servers? ? "MCP: #{mcp_server_names.join(', ')}" : ''
83
+ # Build MCP line based on connection status
84
+ mcp_line = if !mcp_servers?
85
+ '' # No MCP servers configured
86
+ elsif connected_mcp_servers?
87
+ "MCP: #{mcp_server_names.join(', ')}"
88
+ else
89
+ "MCP: (none connected)"
90
+ end
59
91
 
60
92
  puts <<-ROBOT
61
93
 
62
94
  , ,
63
95
  (\\____/) AI Assistant (v#{AIA::VERSION}) is Online
64
96
  (_oo_) #{model_display}#{supports_tools? ? ' (supports tools)' : ''}
65
- (O) using #{AIA.config&.adapter || 'unknown-adapter'} (v#{RubyLLM::VERSION}#{mcp_version})
97
+ (O) using #{AIA.config&.llm&.adapter || 'unknown-adapter'} (v#{RubyLLM::VERSION}#{mcp_version})
66
98
  __||__ \\) model db was last refreshed on
67
- [/______\\] / #{AIA.config&.last_refresh || 'unknown'}
99
+ [/______\\] / #{models_last_refresh || 'unknown'}
68
100
  / \\__AI__/ \\/ #{user_tools? ? 'I will also use your tools' : (tools? ? 'You can share my tools' : 'I did not bring any tools')}
69
101
  / /__\\ #{mcp_line}
70
102
  (\\ /____\\ #{user_tools? && tools? ? 'My Toolbox contains:' : ''}
71
103
  ROBOT
72
104
  if user_tools? && tools?
73
- tool_names = AIA.config.respond_to?(:tool_names) ? AIA.config.tool_names : AIA.config.tools
105
+ tool_names = AIA.config.tool_names
74
106
  if tool_names && !tool_names.to_s.empty?
75
107
  puts WordWrapper::MinimumRaggedness.new(
76
108
  width,