aia 0.9.7 → 0.9.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. checksums.yaml +4 -4
  2. data/.config/tocer/configuration.yml +2 -1
  3. data/.version +1 -1
  4. data/CHANGELOG.md +15 -1
  5. data/README.md +43 -0
  6. data/Rakefile +16 -8
  7. data/examples/directives/ask.rb +21 -0
  8. data/examples/tools/edit_file.rb +2 -0
  9. data/examples/tools/incomplete/calculator_tool.rb +70 -0
  10. data/examples/tools/incomplete/composite_analysis_tool.rb +89 -0
  11. data/examples/tools/incomplete/data_science_kit.rb +128 -0
  12. data/examples/tools/incomplete/database_query_tool.rb +100 -0
  13. data/examples/tools/incomplete/devops_toolkit.rb +112 -0
  14. data/examples/tools/incomplete/error_handling_tool.rb +109 -0
  15. data/examples/tools/incomplete/pdf_page_reader.rb +32 -0
  16. data/examples/tools/incomplete/secure_tool_template.rb +117 -0
  17. data/examples/tools/incomplete/weather_tool.rb +110 -0
  18. data/examples/tools/incomplete/workflow_manager_tool.rb +145 -0
  19. data/examples/tools/list_files.rb +2 -0
  20. data/examples/tools/mcp/README.md +1 -0
  21. data/examples/tools/mcp/github_mcp_server.rb +41 -0
  22. data/examples/tools/mcp/imcp.rb +15 -0
  23. data/examples/tools/read_file.rb +2 -0
  24. data/examples/tools/run_shell_command.rb +2 -0
  25. data/lib/aia/chat_processor_service.rb +3 -26
  26. data/lib/aia/config.rb +542 -414
  27. data/lib/aia/context_manager.rb +3 -8
  28. data/lib/aia/directive_processor.rb +24 -11
  29. data/lib/aia/ruby_llm_adapter.rb +78 -10
  30. data/lib/aia/session.rb +313 -215
  31. data/lib/aia/ui_presenter.rb +7 -5
  32. data/lib/aia/utility.rb +26 -6
  33. data/lib/aia.rb +5 -1
  34. metadata +32 -12
  35. data/lib/aia/shell_command_executor.rb +0 -109
data/lib/aia/config.rb CHANGED
@@ -36,6 +36,7 @@ module AIA
36
36
  system_prompt: '',
37
37
 
38
38
  # Tools
39
+ tools: '', # Comma-separated string of loaded tool names (set by adapter)
39
40
  allowed_tools: nil, # nil means all tools are allowed; otherwise an Array of Strings which are the tool names
40
41
  rejected_tools: nil, # nil means no tools are rejected
41
42
  tool_paths: [], # Strings - absolute and relative to tools
@@ -54,7 +55,6 @@ module AIA
54
55
  append: false, # Default to not append to existing out_file
55
56
 
56
57
  # workflow
57
- next: nil,
58
58
  pipeline: [],
59
59
 
60
60
  # PromptManager::Prompt Tailoring
@@ -124,123 +124,21 @@ module AIA
124
124
  remaining_args = config.remaining_args.dup
125
125
  config.remaining_args = nil
126
126
 
127
- # Check for STDIN content
128
- stdin_content = nil
129
- if !STDIN.tty? && !STDIN.closed?
130
- begin
131
- stdin_content = STDIN.read
132
- STDIN.reopen('/dev/tty') # Reopen STDIN for interactive use
133
- rescue => _
134
- # If we can't reopen, continue without error
135
- end
136
- end
137
-
138
- # Is first remaining argument a prompt ID?
139
- unless remaining_args.empty?
140
- maybe_id = remaining_args.first
141
- maybe_id_plus = File.join(config.prompts_dir, maybe_id + config.prompt_extname)
142
-
143
- if AIA.bad_file?(maybe_id) && AIA.good_file?(maybe_id_plus)
144
- config.prompt_id =remaining_args.shift
145
- end
146
- end
147
-
148
- # Store STDIN content for later processing in session.rb
149
- if stdin_content && !stdin_content.strip.empty?
150
- config.stdin_content = stdin_content
151
- end
152
-
153
- unless remaining_args.empty?
154
- bad_files = remaining_args.reject { |filename| AIA.good_file?(filename) }
155
- if bad_files.any?
156
- STDERR.puts "Error: The following files do not exist: #{bad_files.join(', ')}"
157
- exit 1
158
- end
159
-
160
- config.context_files ||= []
161
- config.context_files += remaining_args
162
- end
163
-
164
- # Check if the last context file is an executable prompt
165
- if config.executable_prompt &&
166
- config.context_files &&
167
- !config.context_files.empty?
168
- config.executable_prompt_file = config.context_files.pop
169
- end
170
-
171
- # TODO: Consider that if there is no prompt ID but there is an executable prompt
172
- # then maybe that is all that is needed.
173
-
174
-
175
- if config.prompt_id.nil? && !config.chat && !config.fuzzy
176
- STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
177
- exit 1
178
- end
179
-
180
- unless config.role.empty?
181
- unless config.roles_prefix.empty?
182
- unless config.role.start_with?(config.roles_prefix)
183
- config.role.prepend "#{config.roles_prefix}/"
184
- end
185
- end
186
- end
187
-
188
- config.roles_dir ||= File.join(config.prompts_dir, config.roles_prefix)
189
-
190
- if config.prompt_id.nil? || config.prompt_id.empty?
191
- if !config.role.nil? || !config.role.empty?
192
- config.prompt_id = config.role
193
- config.role = ''
194
- end
195
- end
196
-
197
- if config.fuzzy && config.prompt_id.empty?
198
- # When fuzzy search is enabled but no prompt ID is provided,
199
- # set a special value to trigger fuzzy search without an initial query
200
- # SMELL: This feels like a cludge
201
- config.prompt_id = '__FUZZY_SEARCH__'
202
- end
203
-
204
- unless [TrueClass, FalseClass].include?(config.chat.class)
205
- if config.chat.nil? || config.chat.empty?
206
- config.chat = false
207
- else
208
- config.chat = true
209
- end
210
- end
211
-
212
- unless [TrueClass, FalseClass].include?(config.fuzzy.class)
213
- if config.fuzzy.nil? || config.fuzzy.empty?
214
- config.fuzzy = false
215
- else
216
- config.fuzzy = true
217
- end
218
- end
219
-
220
- and_exit = false
221
-
222
- if config.completion
223
- generate_completion_script(config.completion)
224
- and_exit = true
225
- end
226
-
227
- exit if and_exit
228
-
229
- # Only require a prompt_id if we're not in chat mode, not using fuzzy search, and no context files
230
- if !config.chat && !config.fuzzy && config.prompt_id.empty? && (!config.context_files || config.context_files.empty?)
231
- STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
232
- exit 1
233
- end
234
-
235
- # If we're in chat mode with context files but no prompt_id, that's valid
236
- if config.chat && config.prompt_id.empty? && config.context_files && !config.context_files.empty?
237
- # This is a valid use case - no action needed
238
- end
239
-
240
- # Tailor the PromptManager::Prompt
241
- if config.parameter_regex
242
- PromptManager::Prompt.parameter_regex = Regexp.new(config.parameter_regex)
243
- end
127
+ stdin_content = process_stdin_content
128
+ config.stdin_content = stdin_content if stdin_content && !stdin_content.strip.empty?
129
+
130
+ process_prompt_id_from_args(config, remaining_args)
131
+ validate_and_set_context_files(config, remaining_args)
132
+ handle_executable_prompt(config)
133
+ validate_required_prompt_id(config)
134
+ process_role_configuration(config)
135
+ handle_fuzzy_search_prompt_id(config)
136
+ normalize_boolean_flags(config)
137
+ handle_completion_script(config)
138
+ validate_final_prompt_requirements(config)
139
+ configure_prompt_manager(config)
140
+ prepare_pipeline(config)
141
+ validate_pipeline_prompts(config)
244
142
 
245
143
  config
246
144
  end
@@ -269,19 +167,14 @@ module AIA
269
167
  def self.load_tools(config)
270
168
  return if config.tool_paths.empty?
271
169
 
272
- exit_on_error = false
170
+ require_all_tools(config)
273
171
 
274
- unless config.allowed_tools.nil?
275
- config.tool_paths.select! do |path|
276
- config.allowed_tools.any? { |allowed| path.include?(allowed) }
277
- end
278
- end
172
+ config
173
+ end
279
174
 
280
- unless config.rejected_tools.nil?
281
- config.tool_paths.reject! do |path|
282
- config.rejected_tools.any? { |rejected| path.include?(rejected) }
283
- end
284
- end
175
+
176
+ def self.require_all_tools(config)
177
+ exit_on_error = false
285
178
 
286
179
  config.tool_paths.each do |tool_path|
287
180
  begin
@@ -295,8 +188,6 @@ module AIA
295
188
  end
296
189
 
297
190
  exit(1) if exit_on_error
298
-
299
- config
300
191
  end
301
192
 
302
193
 
@@ -332,356 +223,337 @@ module AIA
332
223
  config = OpenStruct.new
333
224
 
334
225
  begin
335
- opt_parser = OptionParser.new do |opts|
336
- opts.banner = "Usage: aia [options] [PROMPT_ID] [CONTEXT_FILE]*\n" +
337
- " aia --chat [PROMPT_ID] [CONTEXT_FILE]*\n" +
338
- " aia --chat [CONTEXT_FILE]*"
339
-
340
- opts.on("--chat", "Begin a chat session with the LLM after the initial prompt response; will set --no-out_file so that the LLM response comes to STDOUT.") do
341
- config.chat = true
342
- puts "Debug: Setting chat mode to true" if config.debug
343
- end
226
+ opt_parser = create_option_parser(config)
227
+ opt_parser.parse!
228
+ rescue => e
229
+ STDERR.puts "ERROR: #{e.message}"
230
+ STDERR.puts " use --help for usage report"
231
+ exit 1
232
+ end
344
233
 
345
- opts.on("--adapter ADAPTER", "Interface that adapts AIA to the LLM") do |adapter|
346
- adapter.downcase!
347
- valid_adapters = %w[ ruby_llm ] # NOTE: Add additional adapters here when needed
348
- if valid_adapters.include? adapter
349
- config.adapter = adapter
350
- else
351
- STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
352
- exit 1
353
- end
354
- end
234
+ parse_remaining_arguments(opt_parser, config)
235
+ config
236
+ end
355
237
 
356
- opts.on('--available_models [QUERY]', 'List (then exit) available models that match the optional query - a comma separated list of AND components like: openai,mini') do |query|
238
+ def self.create_option_parser(config)
239
+ OptionParser.new do |opts|
240
+ setup_banner(opts)
241
+ setup_mode_options(opts, config)
242
+ setup_adapter_options(opts, config)
243
+ setup_model_options(opts, config)
244
+ setup_file_options(opts, config)
245
+ setup_prompt_options(opts, config)
246
+ setup_ai_parameters(opts, config)
247
+ setup_audio_image_options(opts, config)
248
+ setup_tool_options(opts, config)
249
+ setup_utility_options(opts, config)
250
+ end
251
+ end
357
252
 
358
- # SMELL: mostly duplications the code in the vailable_models directive
359
- # assumes that the adapter is for the ruby_llm gem
360
- # should this be moved to the Utilities class as a common method?
253
+ def self.setup_banner(opts)
254
+ opts.banner = "Usage: aia [options] [PROMPT_ID] [CONTEXT_FILE]*\n" +
255
+ " aia --chat [PROMPT_ID] [CONTEXT_FILE]*\n" +
256
+ " aia --chat [CONTEXT_FILE]*"
257
+ end
361
258
 
362
- if query.nil?
363
- query = []
364
- else
365
- query = query.split(',')
366
- end
259
+ def self.setup_mode_options(opts, config)
260
+ opts.on("--chat", "Begin a chat session with the LLM after processing all prompts in the pipeline.") do
261
+ config.chat = true
262
+ puts "Debug: Setting chat mode to true" if config.debug
263
+ end
367
264
 
368
- header = "\nAvailable LLMs"
369
- header += " for #{query.join(' and ')}" if query
265
+ opts.on("-f", "--fuzzy", "Use fuzzy matching for prompt search") do
266
+ unless system("which fzf > /dev/null 2>&1")
267
+ STDERR.puts "Error: 'fzf' is not installed. Please install 'fzf' to use the --fuzzy option."
268
+ exit 1
269
+ end
270
+ config.fuzzy = true
271
+ end
370
272
 
371
- puts header + ':'
372
- puts
273
+ opts.on("--terse", "Adds a special instruction to the prompt asking the AI to keep responses short and to the point") do
274
+ config.terse = true
275
+ end
276
+ end
373
277
 
374
- q1 = query.select{|q| q.include?('_to_')}.map{|q| ':'==q[0] ? q[1...] : q}
375
- q2 = query.reject{|q| q.include?('_to_')}
278
+ def self.setup_adapter_options(opts, config)
279
+ opts.on("--adapter ADAPTER", "Interface that adapts AIA to the LLM") do |adapter|
280
+ adapter.downcase!
281
+ valid_adapters = %w[ ruby_llm ] # NOTE: Add additional adapters here when needed
282
+ if valid_adapters.include? adapter
283
+ config.adapter = adapter
284
+ else
285
+ STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
286
+ exit 1
287
+ end
288
+ end
376
289
 
290
+ opts.on('--available_models [QUERY]', 'List (then exit) available models that match the optional query - a comma separated list of AND components like: openai,mini') do |query|
291
+ list_available_models(query)
292
+ end
293
+ end
377
294
 
378
- # query = nil
379
- counter = 0
295
+ def self.setup_model_options(opts, config)
296
+ opts.on("-m MODEL", "--model MODEL", "Name of the LLM model to use") do |model|
297
+ config.model = model
298
+ end
380
299
 
381
- RubyLLM.models.all.each do |llm|
382
- inputs = llm.modalities.input.join(',')
383
- outputs = llm.modalities.output.join(',')
384
- entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
300
+ opts.on("--sm", "--speech_model MODEL", "Speech model to use") do |model|
301
+ config.speech_model = model
302
+ end
385
303
 
386
- if query.nil? || query.empty?
387
- counter += 1
388
- puts entry
389
- next
390
- end
304
+ opts.on("--tm", "--transcription_model MODEL", "Transcription model to use") do |model|
305
+ config.transcription_model = model
306
+ end
307
+ end
391
308
 
392
- show_it = true
393
- q1.each{|q| show_it &&= llm.modalities.send("#{q}?")}
394
- q2.each{|q| show_it &&= entry.include?(q)}
309
+ def self.setup_file_options(opts, config)
310
+ opts.on("-c", "--config_file FILE", "Load config file") do |file|
311
+ load_config_file(file, config)
312
+ end
395
313
 
396
- if show_it
397
- counter += 1
398
- puts entry
399
- end
400
- end
314
+ opts.on("-o", "--[no-]out_file [FILE]", "Output file (default: temp.md)") do |file|
315
+ if file == false # --no-out_file was used
316
+ config.out_file = nil
317
+ elsif file.nil? # No argument provided
318
+ config.out_file = 'temp.md'
319
+ else # File name provided
320
+ config.out_file = File.expand_path(file, Dir.pwd)
321
+ end
322
+ end
401
323
 
402
- puts if counter > 0
403
- puts "#{counter} LLMs matching your query"
404
- puts
324
+ opts.on("-a", "--[no-]append", "Append to output file instead of overwriting") do |append|
325
+ config.append = append
326
+ end
405
327
 
406
- exit
407
- end
328
+ opts.on("-l", "--[no-]log_file [FILE]", "Log file") do |file|
329
+ config.log_file = file
330
+ end
408
331
 
409
- opts.on("-m MODEL", "--model MODEL", "Name of the LLM model to use") do |model|
410
- config.model = model
411
- end
332
+ opts.on("--md", "--[no-]markdown", "Format with Markdown") do |md|
333
+ config.markdown = md
334
+ end
335
+ end
412
336
 
413
- opts.on("-x", "--[no-]exec", "Used to designate an executable prompt file") do |value|
414
- config.executable_prompt = value
415
- end
337
+ def self.setup_prompt_options(opts, config)
338
+ opts.on("--prompts_dir DIR", "Directory containing prompt files") do |dir|
339
+ config.prompts_dir = dir
340
+ end
416
341
 
342
+ opts.on("--roles_prefix PREFIX", "Subdirectory name for role files (default: roles)") do |prefix|
343
+ config.roles_prefix = prefix
344
+ end
417
345
 
418
- opts.on("--terse", "Adds a special instruction to the prompt asking the AI to keep responses short and to the point") do
419
- config.terse = true
420
- end
346
+ opts.on("-r", "--role ROLE_ID", "Role ID to prepend to prompt") do |role|
347
+ config.role = role
348
+ end
421
349
 
422
- opts.on("-c", "--config_file FILE", "Load config file") do |file|
423
- if File.exist?(file)
424
- ext = File.extname(file).downcase
425
- content = File.read(file)
426
-
427
- # Process ERB if filename ends with .erb
428
- if file.end_with?('.erb')
429
- content = ERB.new(content).result
430
- file = file.chomp('.erb')
431
- File.write(file, content)
432
- end
433
-
434
- file_config = case ext
435
- when '.yml', '.yaml'
436
- YAML.safe_load(content, permitted_classes: [Symbol], symbolize_names: true)
437
- when '.toml'
438
- TomlRB.parse(content)
439
- else
440
- raise "Unsupported config file format: #{ext}"
441
- end
442
-
443
- file_config.each do |key, value|
444
- config[key.to_sym] = value
445
- end
446
- else
447
- raise "Config file not found: #{file}"
448
- end
449
- end
350
+ opts.on("-n", "--next PROMPT_ID", "Next prompt to process") do |next_prompt|
351
+ config.pipeline ||= []
352
+ config.pipeline << next_prompt
353
+ end
450
354
 
451
- opts.on("-p", "--prompts_dir DIR", "Directory containing prompt files") do |dir|
452
- config.prompts_dir = dir
453
- end
355
+ opts.on("-p PROMPTS", "--pipeline PROMPTS", "Pipeline of comma-seperated prompt IDs to process") do |pipeline|
356
+ config.pipeline ||= []
357
+ config.pipeline += pipeline.split(',').map(&:strip)
358
+ end
454
359
 
455
- opts.on("--roles_prefix PREFIX", "Subdirectory name for role files (default: roles)") do |prefix|
456
- config.roles_prefix = prefix
457
- end
360
+ opts.on("-x", "--[no-]exec", "Used to designate an executable prompt file") do |value|
361
+ config.executable_prompt = value
362
+ end
458
363
 
459
- opts.on("-r", "--role ROLE_ID", "Role ID to prepend to prompt") do |role|
460
- config.role = role
461
- end
364
+ opts.on("--system_prompt PROMPT_ID", "System prompt ID to use for chat sessions") do |prompt_id|
365
+ config.system_prompt = prompt_id
366
+ end
462
367
 
463
- opts.on("--refresh DAYS", Integer, "Refresh models database interval in days") do |days|
464
- config.refresh = days || 0
465
- end
368
+ opts.on('--regex pattern', 'Regex pattern to extract parameters from prompt text') do |pattern|
369
+ config.parameter_regex = pattern
370
+ end
371
+ end
466
372
 
467
- opts.on('--regex pattern', 'Regex pattern to extract parameters from prompt text') do |pattern|
468
- config.parameter_regex = pattern
469
- end
373
+ def self.setup_ai_parameters(opts, config)
374
+ opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
375
+ config.temperature = temp
376
+ end
470
377
 
471
- opts.on("-o", "--[no-]out_file [FILE]", "Output file (default: temp.md)") do |file|
472
- if file == false # --no-out_file was used
473
- config.out_file = nil
474
- elsif file.nil? # No argument provided
475
- config.out_file = 'temp.md'
476
- else # File name provided
477
- config.out_file = File.expand_path(file, Dir.pwd)
478
- end
479
- end
378
+ opts.on("--max_tokens TOKENS", Integer, "Maximum tokens for text generation") do |tokens|
379
+ config.max_tokens = tokens
380
+ end
480
381
 
481
- opts.on("-a", "--[no-]append", "Append to output file instead of overwriting") do |append|
482
- config.append = append
483
- end
382
+ opts.on("--top_p VALUE", Float, "Top-p sampling value") do |value|
383
+ config.top_p = value
384
+ end
484
385
 
485
- opts.on("-l", "--[no-]log_file [FILE]", "Log file") do |file|
486
- config.log_file = file
487
- end
386
+ opts.on("--frequency_penalty VALUE", Float, "Frequency penalty") do |value|
387
+ config.frequency_penalty = value
388
+ end
488
389
 
489
- opts.on("--md", "--[no-]markdown", "Format with Markdown") do |md|
490
- config.markdown = md
491
- end
390
+ opts.on("--presence_penalty VALUE", Float, "Presence penalty") do |value|
391
+ config.presence_penalty = value
392
+ end
393
+ end
492
394
 
493
- opts.on("-n", "--next PROMPT_ID", "Next prompt to process") do |next_prompt|
494
- config.next = next_prompt
495
- end
395
+ def self.setup_audio_image_options(opts, config)
396
+ opts.on("--speak", "Simple implementation. Uses the speech model to convert text to audio, then plays the audio. Fun with --chat. Supports configuration of speech model and voice.") do
397
+ config.speak = true
398
+ end
496
399
 
497
- opts.on("--pipeline PROMPTS", "Pipeline of prompts to process") do |pipeline|
498
- config.pipeline = pipeline.split(',')
499
- end
400
+ opts.on("--voice VOICE", "Voice to use for speech") do |voice|
401
+ config.voice = voice
402
+ end
500
403
 
501
- opts.on("-f", "--fuzzy", "Use fuzzy matching for prompt search") do
502
- unless system("which fzf > /dev/null 2>&1")
503
- STDERR.puts "Error: 'fzf' is not installed. Please install 'fzf' to use the --fuzzy option."
504
- exit 1
505
- end
506
- config.fuzzy = true
507
- end
404
+ opts.on("--is", "--image_size SIZE", "Image size for image generation") do |size|
405
+ config.image_size = size
406
+ end
508
407
 
509
- opts.on("-d", "--debug", "Enable debug output") do
510
- config.debug = $DEBUG_ME = true
511
- end
408
+ opts.on("--iq", "--image_quality QUALITY", "Image quality for image generation") do |quality|
409
+ config.image_quality = quality
410
+ end
512
411
 
513
- opts.on("--no-debug", "Disable debug output") do
514
- config.debug = $DEBUG_ME = false
515
- end
412
+ opts.on("--style", "--image_style STYLE", "Style for image generation") do |style|
413
+ config.image_style = style
414
+ end
415
+ end
516
416
 
517
- opts.on("-v", "--[no-]verbose", "Be verbose") do |value|
518
- config.verbose = value
519
- end
417
+ def self.setup_tool_options(opts, config)
418
+ opts.on("--rq LIBS", "--require LIBS", "Ruby libraries to require for Ruby directive") do |libs|
419
+ config.require_libs ||= []
420
+ config.require_libs += libs.split(',')
421
+ end
520
422
 
521
- opts.on("--speak", "Simple implementation. Uses the speech model to convert text to audio, then plays the audio. Fun with --chat. Supports configuration of speech model and voice.") do
522
- config.speak = true
523
- end
423
+ opts.on("--tools PATH_LIST", "Add a tool(s)") do |a_path_list|
424
+ process_tools_option(a_path_list, config)
425
+ end
524
426
 
525
- opts.on("--voice VOICE", "Voice to use for speech") do |voice|
526
- config.voice = voice
527
- end
427
+ opts.on("--at", "--allowed_tools TOOLS_LIST", "Allow only these tools to be used") do |tools_list|
428
+ process_allowed_tools_option(tools_list, config)
429
+ end
528
430
 
529
- opts.on("--sm", "--speech_model MODEL", "Speech model to use") do |model|
530
- config.speech_model = model
531
- end
431
+ opts.on("--rt", "--rejected_tools TOOLS_LIST", "Reject these tools") do |tools_list|
432
+ process_rejected_tools_option(tools_list, config)
433
+ end
434
+ end
532
435
 
533
- opts.on("--tm", "--transcription_model MODEL", "Transcription model to use") do |model|
534
- config.transcription_model = model
535
- end
436
+ def self.setup_utility_options(opts, config)
437
+ opts.on("-d", "--debug", "Enable debug output") do
438
+ config.debug = $DEBUG_ME = true
439
+ end
536
440
 
537
- opts.on("--is", "--image_size SIZE", "Image size for image generation") do |size|
538
- config.image_size = size
539
- end
441
+ opts.on("--no-debug", "Disable debug output") do
442
+ config.debug = $DEBUG_ME = false
443
+ end
540
444
 
541
- opts.on("--iq", "--image_quality QUALITY", "Image quality for image generation") do |quality|
542
- config.image_quality = quality
543
- end
445
+ opts.on("-v", "--[no-]verbose", "Be verbose") do |value|
446
+ config.verbose = value
447
+ end
544
448
 
545
- opts.on("--style", "--image_style STYLE", "Style for image generation") do |style|
546
- config.image_style = style
547
- end
449
+ opts.on("--refresh DAYS", Integer, "Refresh models database interval in days") do |days|
450
+ config.refresh = days || 0
451
+ end
548
452
 
549
- opts.on("--system_prompt PROMPT_ID", "System prompt ID to use for chat sessions") do |prompt_id|
550
- config.system_prompt = prompt_id
551
- end
453
+ opts.on("--dump FILE", "Dump config to file") do |file|
454
+ config.dump_file = file
455
+ end
552
456
 
553
- ###################################################
554
- # AI model parameters
555
- opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
556
- config.temperature = temp
557
- end
457
+ opts.on("--completion SHELL", "Show completion script for bash|zsh|fish - default is nil") do |shell|
458
+ config.completion = shell
459
+ end
558
460
 
559
- opts.on("--max_tokens TOKENS", Integer, "Maximum tokens for text generation") do |tokens|
560
- config.max_tokens = tokens
561
- end
461
+ opts.on("--version", "Show version") do
462
+ puts AIA::VERSION
463
+ exit
464
+ end
562
465
 
563
- opts.on("--top_p VALUE", Float, "Top-p sampling value") do |value|
564
- config.top_p = value
565
- end
466
+ opts.on("-h", "--help", "Prints this help") do
467
+ puts <<~HELP
566
468
 
567
- opts.on("--frequency_penalty VALUE", Float, "Frequency penalty") do |value|
568
- config.frequency_penalty = value
569
- end
469
+ AIA your AI Assistant
470
+ - designed for generative AI workflows,
471
+ - effortlessly manage AI prompts,
472
+ - integrate seamlessly with shell and embedded Ruby (ERB),
473
+ - run batch processes,
474
+ - engage in interactive chats,
475
+ - with user defined directives, tools and MCP clients.
570
476
 
571
- opts.on("--presence_penalty VALUE", Float, "Presence penalty") do |value|
572
- config.presence_penalty = value
573
- end
477
+ HELP
574
478
 
575
- opts.on("--dump FILE", "Dump config to file") do |file|
576
- config.dump_file = file
577
- end
479
+ puts opts
578
480
 
579
- opts.on("--completion SHELL", "Show completion script for bash|zsh|fish - default is nil") do |shell|
580
- config.completion = shell
581
- end
481
+ puts <<~EXTRA
582
482
 
583
- opts.on("--version", "Show version") do
584
- puts AIA::VERSION
585
- exit
586
- end
483
+ Explore Further:
484
+ - AIA Report an Issue: https://github.com/MadBomber/aia/issues
485
+ - AIA Documentation: https://github.com/madbomber/aia/blob/main/README.md
486
+ - AIA GitHub Repository: https://github.com/MadBomber/aia
487
+ - PromptManager Docs: https://github.com/MadBomber/prompt_manager/blob/main/README.md
488
+ - ERB Documentation: https://rubyapi.org/o/erb
489
+ - RubyLLM Tool Docs: https://rubyllm.com/guides/tools
490
+ - MCP Client Docs: https://github.com/patvice/ruby_llm-mcp/blob/main/README.md
587
491
 
588
- opts.on("-h", "--help", "Prints this help") do
589
- puts opts
590
- exit
591
- end
492
+ EXTRA
592
493
 
593
- opts.on("--rq LIBS", "--require LIBS", "Ruby libraries to require for Ruby directive") do |libs|
594
- config.require_libs ||= []
595
- config.require_libs += libs.split(',')
596
- end
494
+ exit
495
+ end
496
+ end
597
497
 
598
- opts.on("--tools PATH_LIST", "Add a tool(s)") do |a_path_list|
599
- config.tool_paths ||= []
498
+ def self.list_available_models(query)
499
+ # SMELL: mostly duplications the code in the vailable_models directive
500
+ # assumes that the adapter is for the ruby_llm gem
501
+ # should this be moved to the Utilities class as a common method?
600
502
 
601
- if a_path_list.empty?
602
- STDERR.puts "No list of paths for --tools option"
603
- exit 1
604
- else
605
- paths = a_path_list.split(',').map(&:strip).uniq
606
- end
503
+ if query.nil?
504
+ query = []
505
+ else
506
+ query = query.split(',')
507
+ end
607
508
 
608
- paths.each do |a_path|
609
- if File.exist?(a_path)
610
- if File.file?(a_path)
611
- if '.rb' == File.extname(a_path)
612
- config.tool_paths << a_path
613
- else
614
- STDERR.puts "file should have *.rb extension: #{a_path}"
615
- exit 1
616
- end
617
- elsif File.directory?(a_path)
618
- rb_files = Dir.glob(File.join(a_path, '**', '*.rb'))
619
- config.tool_paths += rb_files
620
- end
621
- else
622
- STDERR.puts "file/dir path is not valid: #{a_path}"
623
- exit 1
624
- end
625
- end
509
+ header = "\nAvailable LLMs"
510
+ header += " for #{query.join(' and ')}" if query
626
511
 
627
- config.tool_paths.uniq!
628
- end
512
+ puts header + ':'
513
+ puts
629
514
 
630
- opts.on("--at", "--allowed_tools TOOLS_LIST", "Allow only these tools to be used") do |tools_list|
631
- config.allowed_tools ||= []
632
- if tools_list.empty?
633
- STDERR.puts "No list of tool names provided for --allowed_tools option"
634
- exit 1
635
- else
636
- config.allowed_tools += tools_list.split(',').map(&:strip)
637
- config.allowed_tools.uniq!
638
- end
639
- end
515
+ q1 = query.select{|q| q.include?('_to_')}.map{|q| ':'==q[0] ? q[1...] : q}
516
+ q2 = query.reject{|q| q.include?('_to_')}
640
517
 
641
- opts.on("--rt", "--rejected_tools TOOLS_LIST", "Reject these tools") do |tools_list|
642
- config.rejected_tools ||= []
643
- if tools_list.empty?
644
- STDERR.puts "No list of tool names provided for --rejected_tools option"
645
- exit 1
646
- else
647
- config.rejected_tools += tools_list.split(',').map(&:strip)
648
- config.rejected_tools.uniq!
649
- end
650
- end
518
+ counter = 0
519
+
520
+ RubyLLM.models.all.each do |llm|
521
+ inputs = llm.modalities.input.join(',')
522
+ outputs = llm.modalities.output.join(',')
523
+ entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
524
+
525
+ if query.nil? || query.empty?
526
+ counter += 1
527
+ puts entry
528
+ next
651
529
  end
652
- opt_parser.parse!
653
- rescue => e
654
- STDERR.puts "ERROR: #{e.message}"
655
- STDERR.puts " use --help for usage report"
656
- exit 1
657
- end
658
530
 
659
- args = ARGV.dup
531
+ show_it = true
532
+ q1.each{|q| show_it &&= llm.modalities.send("#{q}?")}
533
+ q2.each{|q| show_it &&= entry.include?(q)}
660
534
 
661
- # Parse the command line arguments
662
- begin
663
- config.remaining_args = opt_parser.parse(args)
664
- rescue OptionParser::InvalidOption => e
665
- puts e.message
666
- puts opt_parser
667
- exit 1
535
+ if show_it
536
+ counter += 1
537
+ puts entry
538
+ end
668
539
  end
669
540
 
670
- config
671
- end
541
+ puts if counter > 0
542
+ puts "#{counter} LLMs matching your query"
543
+ puts
672
544
 
545
+ exit
546
+ end
673
547
 
674
- def self.cf_options(file)
675
- config = OpenStruct.new
676
-
548
+ def self.load_config_file(file, config)
677
549
  if File.exist?(file)
678
- ext = File.extname(file).downcase
550
+ ext = File.extname(file).downcase
679
551
  content = File.read(file)
680
552
 
681
553
  # Process ERB if filename ends with .erb
682
554
  if file.end_with?('.erb')
683
555
  content = ERB.new(content).result
684
- file = file.chomp('.erb')
556
+ file = file.chomp('.erb')
685
557
  File.write(file, content)
686
558
  end
687
559
 
@@ -695,21 +567,277 @@ module AIA
695
567
  end
696
568
 
697
569
  file_config.each do |key, value|
698
- config[key] = value
570
+ config[key.to_sym] = value
699
571
  end
700
572
  else
701
- STDERR.puts "WARNING:Config file not found: #{file}"
573
+ raise "Config file not found: #{file}"
574
+ end
575
+ end
576
+
577
+ def self.process_tools_option(a_path_list, config)
578
+ config.tool_paths ||= []
579
+
580
+ if a_path_list.empty?
581
+ STDERR.puts "No list of paths for --tools option"
582
+ exit 1
583
+ else
584
+ paths = a_path_list.split(',').map(&:strip).uniq
585
+ end
586
+
587
+ paths.each do |a_path|
588
+ if File.exist?(a_path)
589
+ if File.file?(a_path)
590
+ if '.rb' == File.extname(a_path)
591
+ config.tool_paths << a_path
592
+ else
593
+ STDERR.puts "file should have *.rb extension: #{a_path}"
594
+ exit 1
595
+ end
596
+ elsif File.directory?(a_path)
597
+ rb_files = Dir.glob(File.join(a_path, '*.rb'))
598
+ config.tool_paths += rb_files
599
+ end
600
+ else
601
+ STDERR.puts "file/dir path is not valid: #{a_path}"
602
+ exit 1
603
+ end
604
+ end
605
+
606
+ config.tool_paths.uniq!
607
+ end
608
+
609
+ def self.process_allowed_tools_option(tools_list, config)
610
+ config.allowed_tools ||= []
611
+ if tools_list.empty?
612
+ STDERR.puts "No list of tool names provided for --allowed_tools option"
613
+ exit 1
614
+ else
615
+ config.allowed_tools += tools_list.split(',').map(&:strip)
616
+ config.allowed_tools.uniq!
702
617
  end
618
+ end
619
+
620
+ def self.process_rejected_tools_option(tools_list, config)
621
+ config.rejected_tools ||= []
622
+ if tools_list.empty?
623
+ STDERR.puts "No list of tool names provided for --rejected_tools option"
624
+ exit 1
625
+ else
626
+ config.rejected_tools += tools_list.split(',').map(&:strip)
627
+ config.rejected_tools.uniq!
628
+ end
629
+ end
703
630
 
704
- if config.last_refresh
705
- if config.last_refresh.is_a? String
706
- config.last_refresh = Date.strptime(config.last_refresh, '%Y-%m-%d')
631
+ def self.process_stdin_content
632
+ stdin_content = ''
633
+
634
+ if !STDIN.tty? && !STDIN.closed?
635
+ begin
636
+ stdin_content << "\n" + STDIN.read
637
+ STDIN.reopen('/dev/tty') # Reopen STDIN for interactive use
638
+ rescue => _
639
+ # If we can't reopen, continue without error
707
640
  end
708
641
  end
709
642
 
643
+ stdin_content
644
+ end
645
+
646
+ def self.process_prompt_id_from_args(config, remaining_args)
647
+ return if remaining_args.empty?
648
+
649
+ maybe_id = remaining_args.first
650
+ maybe_id_plus = File.join(config.prompts_dir, maybe_id + config.prompt_extname)
651
+
652
+ if AIA.bad_file?(maybe_id) && AIA.good_file?(maybe_id_plus)
653
+ config.prompt_id = remaining_args.shift
654
+ end
655
+ end
656
+
657
+ def self.validate_and_set_context_files(config, remaining_args)
658
+ return if remaining_args.empty?
659
+
660
+ bad_files = remaining_args.reject { |filename| AIA.good_file?(filename) }
661
+ if bad_files.any?
662
+ STDERR.puts "Error: The following files do not exist: #{bad_files.join(', ')}"
663
+ exit 1
664
+ end
665
+
666
+ config.context_files ||= []
667
+ config.context_files += remaining_args
668
+ end
669
+
670
+ def self.handle_executable_prompt(config)
671
+ return unless config.executable_prompt && config.context_files && !config.context_files.empty?
672
+
673
+ config.executable_prompt_file = config.context_files.pop
674
+ end
675
+
676
+ def self.validate_required_prompt_id(config)
677
+ return unless config.prompt_id.nil? && !config.chat && !config.fuzzy
678
+
679
+ STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
680
+ exit 1
681
+ end
682
+
683
+ def self.process_role_configuration(config)
684
+ return if config.role.empty?
685
+
686
+ unless config.roles_prefix.empty?
687
+ unless config.role.start_with?(config.roles_prefix)
688
+ config.role.prepend "#{config.roles_prefix}/"
689
+ end
690
+ end
691
+
692
+ config.roles_dir ||= File.join(config.prompts_dir, config.roles_prefix)
693
+
694
+ if config.prompt_id.nil? || config.prompt_id.empty?
695
+ if !config.role.nil? && !config.role.empty?
696
+ config.prompt_id = config.role
697
+ config.pipeline.prepend config.prompt_id
698
+ config.role = ''
699
+ end
700
+ end
701
+ end
702
+
703
+ def self.handle_fuzzy_search_prompt_id(config)
704
+ return unless config.fuzzy && config.prompt_id.empty?
705
+
706
+ # When fuzzy search is enabled but no prompt ID is provided,
707
+ # set a special value to trigger fuzzy search without an initial query
708
+ # SMELL: This feels like a cludge
709
+ config.prompt_id = '__FUZZY_SEARCH__'
710
+ end
711
+
712
+ def self.normalize_boolean_flags(config)
713
+ normalize_boolean_flag(config, :chat)
714
+ normalize_boolean_flag(config, :fuzzy)
715
+ end
716
+
717
+ def self.normalize_boolean_flag(config, flag)
718
+ return if [TrueClass, FalseClass].include?(config[flag].class)
719
+
720
+ config[flag] = if config[flag].nil? || config[flag].empty?
721
+ false
722
+ else
723
+ true
724
+ end
725
+ end
726
+
727
+ def self.handle_completion_script(config)
728
+ return unless config.completion
729
+
730
+ generate_completion_script(config.completion)
731
+ exit
732
+ end
733
+
734
+ def self.validate_final_prompt_requirements(config)
735
+ # Only require a prompt_id if we're not in chat mode, not using fuzzy search, and no context files
736
+ if !config.chat && !config.fuzzy && (config.prompt_id.nil? || config.prompt_id.empty?) && (!config.context_files || config.context_files.empty?)
737
+ STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
738
+ exit 1
739
+ end
740
+
741
+ # If we're in chat mode with context files but no prompt_id, that's valid
742
+ # This is handled implicitly - no action needed
743
+ end
744
+
745
+ def self.configure_prompt_manager(config)
746
+ return unless config.parameter_regex
747
+
748
+ PromptManager::Prompt.parameter_regex = Regexp.new(config.parameter_regex)
749
+ end
750
+
751
+ def self.prepare_pipeline(config)
752
+ return if config.prompt_id.nil? || config.prompt_id.empty? || config.prompt_id == config.pipeline.first
753
+
754
+ config.pipeline.prepend config.prompt_id
755
+ end
756
+
757
+ def self.validate_pipeline_prompts(config)
758
+ return if config.pipeline.empty?
759
+
760
+ and_exit = false
761
+
762
+ config.pipeline.each do |prompt_id|
763
+ # Skip empty prompt IDs (can happen in chat-only mode)
764
+ next if prompt_id.nil? || prompt_id.empty?
765
+
766
+ prompt_file_path = File.join(config.prompts_dir, "#{prompt_id}.txt")
767
+ unless File.exist?(prompt_file_path)
768
+ STDERR.puts "Error: Prompt ID '#{prompt_id}' does not exist at #{prompt_file_path}"
769
+ and_exit = true
770
+ end
771
+ end
772
+
773
+ exit(1) if and_exit
774
+ end
775
+
776
+ def self.parse_remaining_arguments(opt_parser, config)
777
+ args = ARGV.dup
778
+
779
+ # Parse the command line arguments
780
+ begin
781
+ config.remaining_args = opt_parser.parse(args)
782
+ rescue OptionParser::InvalidOption => e
783
+ puts e.message
784
+ puts opt_parser
785
+ exit 1
786
+ end
787
+ end
788
+
789
+
790
+ def self.cf_options(file)
791
+ config = OpenStruct.new
792
+
793
+ if File.exist?(file)
794
+ content = read_and_process_config_file(file)
795
+ file_config = parse_config_content(content, File.extname(file).downcase)
796
+ apply_file_config_to_struct(config, file_config)
797
+ else
798
+ STDERR.puts "WARNING:Config file not found: #{file}"
799
+ end
800
+
801
+ normalize_last_refresh_date(config)
710
802
  config
711
803
  end
712
804
 
805
+ def self.read_and_process_config_file(file)
806
+ content = File.read(file)
807
+
808
+ # Process ERB if filename ends with .erb
809
+ if file.end_with?('.erb')
810
+ content = ERB.new(content).result
811
+ processed_file = file.chomp('.erb')
812
+ File.write(processed_file, content)
813
+ end
814
+
815
+ content
816
+ end
817
+
818
+ def self.parse_config_content(content, ext)
819
+ case ext
820
+ when '.yml', '.yaml'
821
+ YAML.safe_load(content, permitted_classes: [Symbol], symbolize_names: true)
822
+ when '.toml'
823
+ TomlRB.parse(content)
824
+ else
825
+ raise "Unsupported config file format: #{ext}"
826
+ end
827
+ end
828
+
829
+ def self.apply_file_config_to_struct(config, file_config)
830
+ file_config.each do |key, value|
831
+ config[key] = value
832
+ end
833
+ end
834
+
835
+ def self.normalize_last_refresh_date(config)
836
+ return unless config.last_refresh&.is_a?(String)
837
+
838
+ config.last_refresh = Date.strptime(config.last_refresh, '%Y-%m-%d')
839
+ end
840
+
713
841
 
714
842
  def self.generate_completion_script(shell)
715
843
  script_path = File.join(File.dirname(__FILE__), "aia_completion.#{shell}")