aia 0.9.8 → 0.9.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/aia/config.rb CHANGED
@@ -36,6 +36,7 @@ module AIA
36
36
  system_prompt: '',
37
37
 
38
38
  # Tools
39
+ tools: '', # Comma-separated string of loaded tool names (set by adapter)
39
40
  allowed_tools: nil, # nil means all tools are allowed; otherwise an Array of Strings which are the tool names
40
41
  rejected_tools: nil, # nil means no tools are rejected
41
42
  tool_paths: [], # Strings - absolute and relative to tools
@@ -123,144 +124,21 @@ module AIA
123
124
  remaining_args = config.remaining_args.dup
124
125
  config.remaining_args = nil
125
126
 
126
- # Check for STDIN content
127
- stdin_content = ''
128
-
129
- if !STDIN.tty? && !STDIN.closed?
130
- begin
131
- stdin_content << "\n" + STDIN.read
132
- STDIN.reopen('/dev/tty') # Reopen STDIN for interactive use
133
- rescue => _
134
- # If we can't reopen, continue without error
135
- end
136
- end
137
-
138
- # Is first remaining argument a prompt ID?
139
- unless remaining_args.empty?
140
- maybe_id = remaining_args.first
141
- maybe_id_plus = File.join(config.prompts_dir, maybe_id + config.prompt_extname)
142
-
143
- if AIA.bad_file?(maybe_id) && AIA.good_file?(maybe_id_plus)
144
- config.prompt_id =remaining_args.shift
145
- end
146
- end
147
-
148
- # Store STDIN content for later processing in session.rb
149
- if stdin_content && !stdin_content.strip.empty?
150
- config.stdin_content = stdin_content
151
- end
152
-
153
- unless remaining_args.empty?
154
- bad_files = remaining_args.reject { |filename| AIA.good_file?(filename) }
155
- if bad_files.any?
156
- STDERR.puts "Error: The following files do not exist: #{bad_files.join(', ')}"
157
- exit 1
158
- end
159
-
160
- config.context_files ||= []
161
- config.context_files += remaining_args
162
- end
163
-
164
- # Check if the last context file is an executable prompt
165
- if config.executable_prompt &&
166
- config.context_files &&
167
- !config.context_files.empty?
168
- config.executable_prompt_file = config.context_files.pop
169
- end
170
-
171
- # TODO: Consider that if there is no prompt ID but there is an executable prompt
172
- # then maybe that is all that is needed.
173
-
174
-
175
- if config.prompt_id.nil? && !config.chat && !config.fuzzy
176
- STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
177
- exit 1
178
- end
179
-
180
- unless config.role.empty?
181
- unless config.roles_prefix.empty?
182
- unless config.role.start_with?(config.roles_prefix)
183
- config.role.prepend "#{config.roles_prefix}/"
184
- end
185
- end
186
- end
187
-
188
- config.roles_dir ||= File.join(config.prompts_dir, config.roles_prefix)
189
-
190
- if config.prompt_id.nil? || config.prompt_id.empty?
191
- if !config.role.nil? || !config.role.empty?
192
- config.prompt_id = config.role
193
- config.pipeline.prepend config.prompt_id
194
- config.role = ''
195
- end
196
- end
197
-
198
- if config.fuzzy && config.prompt_id.empty?
199
- # When fuzzy search is enabled but no prompt ID is provided,
200
- # set a special value to trigger fuzzy search without an initial query
201
- # SMELL: This feels like a cludge
202
- config.prompt_id = '__FUZZY_SEARCH__'
203
- end
204
-
205
- unless [TrueClass, FalseClass].include?(config.chat.class)
206
- if config.chat.nil? || config.chat.empty?
207
- config.chat = false
208
- else
209
- config.chat = true
210
- end
211
- end
212
-
213
- unless [TrueClass, FalseClass].include?(config.fuzzy.class)
214
- if config.fuzzy.nil? || config.fuzzy.empty?
215
- config.fuzzy = false
216
- else
217
- config.fuzzy = true
218
- end
219
- end
220
-
221
- and_exit = false
222
-
223
- if config.completion
224
- generate_completion_script(config.completion)
225
- and_exit = true
226
- end
227
-
228
- exit if and_exit
229
-
230
- # Only require a prompt_id if we're not in chat mode, not using fuzzy search, and no context files
231
- if !config.chat && !config.fuzzy && config.prompt_id.empty? && (!config.context_files || config.context_files.empty?)
232
- STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
233
- exit 1
234
- end
235
-
236
- # If we're in chat mode with context files but no prompt_id, that's valid
237
- if config.chat && config.prompt_id.empty? && config.context_files && !config.context_files.empty?
238
- # This is a valid use case - no action needed
239
- end
240
-
241
- # Tailor the PromptManager::Prompt
242
- if config.parameter_regex
243
- PromptManager::Prompt.parameter_regex = Regexp.new(config.parameter_regex)
244
- end
245
-
246
- if !config.prompt_id.empty? && config.prompt_id != config.pipeline.first
247
- config.pipeline.prepend config.prompt_id
248
- end
249
-
250
- unless config.pipeline.empty?
251
- config.pipeline.each do |prompt_id|
252
- # Skip empty prompt IDs (can happen in chat-only mode)
253
- next if prompt_id.nil? || prompt_id.empty?
254
-
255
- prompt_file_path = File.join(config.prompts_dir, "#{prompt_id}.txt")
256
- unless File.exist?(prompt_file_path)
257
- STDERR.puts "Error: Prompt ID '#{prompt_id}' does not exist at #{prompt_file_path}"
258
- and_exit = true
259
- end
260
- end
261
- end
262
-
263
- exit(1) if and_exit
127
+ stdin_content = process_stdin_content
128
+ config.stdin_content = stdin_content if stdin_content && !stdin_content.strip.empty?
129
+
130
+ process_prompt_id_from_args(config, remaining_args)
131
+ validate_and_set_context_files(config, remaining_args)
132
+ handle_executable_prompt(config)
133
+ validate_required_prompt_id(config)
134
+ process_role_configuration(config)
135
+ handle_fuzzy_search_prompt_id(config)
136
+ normalize_boolean_flags(config)
137
+ handle_completion_script(config)
138
+ validate_final_prompt_requirements(config)
139
+ configure_prompt_manager(config)
140
+ prepare_pipeline(config)
141
+ validate_pipeline_prompts(config)
264
142
 
265
143
  config
266
144
  end
@@ -289,19 +167,14 @@ module AIA
289
167
  def self.load_tools(config)
290
168
  return if config.tool_paths.empty?
291
169
 
292
- exit_on_error = false
170
+ require_all_tools(config)
293
171
 
294
- unless config.allowed_tools.nil?
295
- config.tool_paths.select! do |path|
296
- config.allowed_tools.any? { |allowed| path.include?(allowed) }
297
- end
298
- end
172
+ config
173
+ end
299
174
 
300
- unless config.rejected_tools.nil?
301
- config.tool_paths.reject! do |path|
302
- config.rejected_tools.any? { |rejected| path.include?(rejected) }
303
- end
304
- end
175
+
176
+ def self.require_all_tools(config)
177
+ exit_on_error = false
305
178
 
306
179
  config.tool_paths.each do |tool_path|
307
180
  begin
@@ -315,8 +188,6 @@ module AIA
315
188
  end
316
189
 
317
190
  exit(1) if exit_on_error
318
-
319
- config
320
191
  end
321
192
 
322
193
 
@@ -352,358 +223,337 @@ module AIA
352
223
  config = OpenStruct.new
353
224
 
354
225
  begin
355
- opt_parser = OptionParser.new do |opts|
356
- opts.banner = "Usage: aia [options] [PROMPT_ID] [CONTEXT_FILE]*\n" +
357
- " aia --chat [PROMPT_ID] [CONTEXT_FILE]*\n" +
358
- " aia --chat [CONTEXT_FILE]*"
359
-
360
- opts.on("--chat", "Begin a chat session with the LLM after processing all prompts in the pipeline.") do
361
- config.chat = true
362
- puts "Debug: Setting chat mode to true" if config.debug
363
- end
226
+ opt_parser = create_option_parser(config)
227
+ opt_parser.parse!
228
+ rescue => e
229
+ STDERR.puts "ERROR: #{e.message}"
230
+ STDERR.puts " use --help for usage report"
231
+ exit 1
232
+ end
364
233
 
365
- opts.on("--adapter ADAPTER", "Interface that adapts AIA to the LLM") do |adapter|
366
- adapter.downcase!
367
- valid_adapters = %w[ ruby_llm ] # NOTE: Add additional adapters here when needed
368
- if valid_adapters.include? adapter
369
- config.adapter = adapter
370
- else
371
- STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
372
- exit 1
373
- end
374
- end
234
+ parse_remaining_arguments(opt_parser, config)
235
+ config
236
+ end
375
237
 
376
- opts.on('--available_models [QUERY]', 'List (then exit) available models that match the optional query - a comma separated list of AND components like: openai,mini') do |query|
238
+ def self.create_option_parser(config)
239
+ OptionParser.new do |opts|
240
+ setup_banner(opts)
241
+ setup_mode_options(opts, config)
242
+ setup_adapter_options(opts, config)
243
+ setup_model_options(opts, config)
244
+ setup_file_options(opts, config)
245
+ setup_prompt_options(opts, config)
246
+ setup_ai_parameters(opts, config)
247
+ setup_audio_image_options(opts, config)
248
+ setup_tool_options(opts, config)
249
+ setup_utility_options(opts, config)
250
+ end
251
+ end
377
252
 
378
- # SMELL: mostly duplications the code in the vailable_models directive
379
- # assumes that the adapter is for the ruby_llm gem
380
- # should this be moved to the Utilities class as a common method?
253
+ def self.setup_banner(opts)
254
+ opts.banner = "Usage: aia [options] [PROMPT_ID] [CONTEXT_FILE]*\n" +
255
+ " aia --chat [PROMPT_ID] [CONTEXT_FILE]*\n" +
256
+ " aia --chat [CONTEXT_FILE]*"
257
+ end
381
258
 
382
- if query.nil?
383
- query = []
384
- else
385
- query = query.split(',')
386
- end
259
+ def self.setup_mode_options(opts, config)
260
+ opts.on("--chat", "Begin a chat session with the LLM after processing all prompts in the pipeline.") do
261
+ config.chat = true
262
+ puts "Debug: Setting chat mode to true" if config.debug
263
+ end
387
264
 
388
- header = "\nAvailable LLMs"
389
- header += " for #{query.join(' and ')}" if query
265
+ opts.on("-f", "--fuzzy", "Use fuzzy matching for prompt search") do
266
+ unless system("which fzf > /dev/null 2>&1")
267
+ STDERR.puts "Error: 'fzf' is not installed. Please install 'fzf' to use the --fuzzy option."
268
+ exit 1
269
+ end
270
+ config.fuzzy = true
271
+ end
390
272
 
391
- puts header + ':'
392
- puts
273
+ opts.on("--terse", "Adds a special instruction to the prompt asking the AI to keep responses short and to the point") do
274
+ config.terse = true
275
+ end
276
+ end
393
277
 
394
- q1 = query.select{|q| q.include?('_to_')}.map{|q| ':'==q[0] ? q[1...] : q}
395
- q2 = query.reject{|q| q.include?('_to_')}
278
+ def self.setup_adapter_options(opts, config)
279
+ opts.on("--adapter ADAPTER", "Interface that adapts AIA to the LLM") do |adapter|
280
+ adapter.downcase!
281
+ valid_adapters = %w[ ruby_llm ] # NOTE: Add additional adapters here when needed
282
+ if valid_adapters.include? adapter
283
+ config.adapter = adapter
284
+ else
285
+ STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
286
+ exit 1
287
+ end
288
+ end
396
289
 
290
+ opts.on('--available_models [QUERY]', 'List (then exit) available models that match the optional query - a comma separated list of AND components like: openai,mini') do |query|
291
+ list_available_models(query)
292
+ end
293
+ end
397
294
 
398
- # query = nil
399
- counter = 0
295
+ def self.setup_model_options(opts, config)
296
+ opts.on("-m MODEL", "--model MODEL", "Name of the LLM model to use") do |model|
297
+ config.model = model
298
+ end
400
299
 
401
- RubyLLM.models.all.each do |llm|
402
- inputs = llm.modalities.input.join(',')
403
- outputs = llm.modalities.output.join(',')
404
- entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
300
+ opts.on("--sm", "--speech_model MODEL", "Speech model to use") do |model|
301
+ config.speech_model = model
302
+ end
405
303
 
406
- if query.nil? || query.empty?
407
- counter += 1
408
- puts entry
409
- next
410
- end
304
+ opts.on("--tm", "--transcription_model MODEL", "Transcription model to use") do |model|
305
+ config.transcription_model = model
306
+ end
307
+ end
411
308
 
412
- show_it = true
413
- q1.each{|q| show_it &&= llm.modalities.send("#{q}?")}
414
- q2.each{|q| show_it &&= entry.include?(q)}
309
+ def self.setup_file_options(opts, config)
310
+ opts.on("-c", "--config_file FILE", "Load config file") do |file|
311
+ load_config_file(file, config)
312
+ end
415
313
 
416
- if show_it
417
- counter += 1
418
- puts entry
419
- end
420
- end
314
+ opts.on("-o", "--[no-]out_file [FILE]", "Output file (default: temp.md)") do |file|
315
+ if file == false # --no-out_file was used
316
+ config.out_file = nil
317
+ elsif file.nil? # No argument provided
318
+ config.out_file = 'temp.md'
319
+ else # File name provided
320
+ config.out_file = File.expand_path(file, Dir.pwd)
321
+ end
322
+ end
421
323
 
422
- puts if counter > 0
423
- puts "#{counter} LLMs matching your query"
424
- puts
324
+ opts.on("-a", "--[no-]append", "Append to output file instead of overwriting") do |append|
325
+ config.append = append
326
+ end
425
327
 
426
- exit
427
- end
328
+ opts.on("-l", "--[no-]log_file [FILE]", "Log file") do |file|
329
+ config.log_file = file
330
+ end
428
331
 
429
- opts.on("-m MODEL", "--model MODEL", "Name of the LLM model to use") do |model|
430
- config.model = model
431
- end
332
+ opts.on("--md", "--[no-]markdown", "Format with Markdown") do |md|
333
+ config.markdown = md
334
+ end
335
+ end
432
336
 
433
- opts.on("-x", "--[no-]exec", "Used to designate an executable prompt file") do |value|
434
- config.executable_prompt = value
435
- end
337
+ def self.setup_prompt_options(opts, config)
338
+ opts.on("--prompts_dir DIR", "Directory containing prompt files") do |dir|
339
+ config.prompts_dir = dir
340
+ end
436
341
 
342
+ opts.on("--roles_prefix PREFIX", "Subdirectory name for role files (default: roles)") do |prefix|
343
+ config.roles_prefix = prefix
344
+ end
437
345
 
438
- opts.on("--terse", "Adds a special instruction to the prompt asking the AI to keep responses short and to the point") do
439
- config.terse = true
440
- end
346
+ opts.on("-r", "--role ROLE_ID", "Role ID to prepend to prompt") do |role|
347
+ config.role = role
348
+ end
441
349
 
442
- opts.on("-c", "--config_file FILE", "Load config file") do |file|
443
- if File.exist?(file)
444
- ext = File.extname(file).downcase
445
- content = File.read(file)
446
-
447
- # Process ERB if filename ends with .erb
448
- if file.end_with?('.erb')
449
- content = ERB.new(content).result
450
- file = file.chomp('.erb')
451
- File.write(file, content)
452
- end
453
-
454
- file_config = case ext
455
- when '.yml', '.yaml'
456
- YAML.safe_load(content, permitted_classes: [Symbol], symbolize_names: true)
457
- when '.toml'
458
- TomlRB.parse(content)
459
- else
460
- raise "Unsupported config file format: #{ext}"
461
- end
462
-
463
- file_config.each do |key, value|
464
- config[key.to_sym] = value
465
- end
466
- else
467
- raise "Config file not found: #{file}"
468
- end
469
- end
350
+ opts.on("-n", "--next PROMPT_ID", "Next prompt to process") do |next_prompt|
351
+ config.pipeline ||= []
352
+ config.pipeline << next_prompt
353
+ end
470
354
 
471
- opts.on("--prompts_dir DIR", "Directory containing prompt files") do |dir|
472
- config.prompts_dir = dir
473
- end
355
+ opts.on("-p PROMPTS", "--pipeline PROMPTS", "Pipeline of comma-seperated prompt IDs to process") do |pipeline|
356
+ config.pipeline ||= []
357
+ config.pipeline += pipeline.split(',').map(&:strip)
358
+ end
474
359
 
475
- opts.on("--roles_prefix PREFIX", "Subdirectory name for role files (default: roles)") do |prefix|
476
- config.roles_prefix = prefix
477
- end
360
+ opts.on("-x", "--[no-]exec", "Used to designate an executable prompt file") do |value|
361
+ config.executable_prompt = value
362
+ end
478
363
 
479
- opts.on("-r", "--role ROLE_ID", "Role ID to prepend to prompt") do |role|
480
- config.role = role
481
- end
364
+ opts.on("--system_prompt PROMPT_ID", "System prompt ID to use for chat sessions") do |prompt_id|
365
+ config.system_prompt = prompt_id
366
+ end
482
367
 
483
- opts.on("--refresh DAYS", Integer, "Refresh models database interval in days") do |days|
484
- config.refresh = days || 0
485
- end
368
+ opts.on('--regex pattern', 'Regex pattern to extract parameters from prompt text') do |pattern|
369
+ config.parameter_regex = pattern
370
+ end
371
+ end
486
372
 
487
- opts.on('--regex pattern', 'Regex pattern to extract parameters from prompt text') do |pattern|
488
- config.parameter_regex = pattern
489
- end
373
+ def self.setup_ai_parameters(opts, config)
374
+ opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
375
+ config.temperature = temp
376
+ end
490
377
 
491
- opts.on("-o", "--[no-]out_file [FILE]", "Output file (default: temp.md)") do |file|
492
- if file == false # --no-out_file was used
493
- config.out_file = nil
494
- elsif file.nil? # No argument provided
495
- config.out_file = 'temp.md'
496
- else # File name provided
497
- config.out_file = File.expand_path(file, Dir.pwd)
498
- end
499
- end
378
+ opts.on("--max_tokens TOKENS", Integer, "Maximum tokens for text generation") do |tokens|
379
+ config.max_tokens = tokens
380
+ end
500
381
 
501
- opts.on("-a", "--[no-]append", "Append to output file instead of overwriting") do |append|
502
- config.append = append
503
- end
382
+ opts.on("--top_p VALUE", Float, "Top-p sampling value") do |value|
383
+ config.top_p = value
384
+ end
504
385
 
505
- opts.on("-l", "--[no-]log_file [FILE]", "Log file") do |file|
506
- config.log_file = file
507
- end
386
+ opts.on("--frequency_penalty VALUE", Float, "Frequency penalty") do |value|
387
+ config.frequency_penalty = value
388
+ end
508
389
 
509
- opts.on("--md", "--[no-]markdown", "Format with Markdown") do |md|
510
- config.markdown = md
511
- end
390
+ opts.on("--presence_penalty VALUE", Float, "Presence penalty") do |value|
391
+ config.presence_penalty = value
392
+ end
393
+ end
512
394
 
513
- opts.on("-n", "--next PROMPT_ID", "Next prompt to process") do |next_prompt|
514
- config.pipeline ||= []
515
- config.pipeline << next_prompt
516
- end
395
+ def self.setup_audio_image_options(opts, config)
396
+ opts.on("--speak", "Simple implementation. Uses the speech model to convert text to audio, then plays the audio. Fun with --chat. Supports configuration of speech model and voice.") do
397
+ config.speak = true
398
+ end
517
399
 
518
- opts.on("-p PROMPTS", "--pipeline PROMPTS", "Pipeline of comma-seperated prompt IDs to process") do |pipeline|
519
- config.pipeline ||= []
520
- config.pipeline += pipeline.split(',').map(&:strip)
521
- end
400
+ opts.on("--voice VOICE", "Voice to use for speech") do |voice|
401
+ config.voice = voice
402
+ end
522
403
 
523
- opts.on("-f", "--fuzzy", "Use fuzzy matching for prompt search") do
524
- unless system("which fzf > /dev/null 2>&1")
525
- STDERR.puts "Error: 'fzf' is not installed. Please install 'fzf' to use the --fuzzy option."
526
- exit 1
527
- end
528
- config.fuzzy = true
529
- end
404
+ opts.on("--is", "--image_size SIZE", "Image size for image generation") do |size|
405
+ config.image_size = size
406
+ end
530
407
 
531
- opts.on("-d", "--debug", "Enable debug output") do
532
- config.debug = $DEBUG_ME = true
533
- end
408
+ opts.on("--iq", "--image_quality QUALITY", "Image quality for image generation") do |quality|
409
+ config.image_quality = quality
410
+ end
534
411
 
535
- opts.on("--no-debug", "Disable debug output") do
536
- config.debug = $DEBUG_ME = false
537
- end
412
+ opts.on("--style", "--image_style STYLE", "Style for image generation") do |style|
413
+ config.image_style = style
414
+ end
415
+ end
538
416
 
539
- opts.on("-v", "--[no-]verbose", "Be verbose") do |value|
540
- config.verbose = value
541
- end
417
+ def self.setup_tool_options(opts, config)
418
+ opts.on("--rq LIBS", "--require LIBS", "Ruby libraries to require for Ruby directive") do |libs|
419
+ config.require_libs ||= []
420
+ config.require_libs += libs.split(',')
421
+ end
542
422
 
543
- opts.on("--speak", "Simple implementation. Uses the speech model to convert text to audio, then plays the audio. Fun with --chat. Supports configuration of speech model and voice.") do
544
- config.speak = true
545
- end
423
+ opts.on("--tools PATH_LIST", "Add a tool(s)") do |a_path_list|
424
+ process_tools_option(a_path_list, config)
425
+ end
546
426
 
547
- opts.on("--voice VOICE", "Voice to use for speech") do |voice|
548
- config.voice = voice
549
- end
427
+ opts.on("--at", "--allowed_tools TOOLS_LIST", "Allow only these tools to be used") do |tools_list|
428
+ process_allowed_tools_option(tools_list, config)
429
+ end
550
430
 
551
- opts.on("--sm", "--speech_model MODEL", "Speech model to use") do |model|
552
- config.speech_model = model
553
- end
431
+ opts.on("--rt", "--rejected_tools TOOLS_LIST", "Reject these tools") do |tools_list|
432
+ process_rejected_tools_option(tools_list, config)
433
+ end
434
+ end
554
435
 
555
- opts.on("--tm", "--transcription_model MODEL", "Transcription model to use") do |model|
556
- config.transcription_model = model
557
- end
436
+ def self.setup_utility_options(opts, config)
437
+ opts.on("-d", "--debug", "Enable debug output") do
438
+ config.debug = $DEBUG_ME = true
439
+ end
558
440
 
559
- opts.on("--is", "--image_size SIZE", "Image size for image generation") do |size|
560
- config.image_size = size
561
- end
441
+ opts.on("--no-debug", "Disable debug output") do
442
+ config.debug = $DEBUG_ME = false
443
+ end
562
444
 
563
- opts.on("--iq", "--image_quality QUALITY", "Image quality for image generation") do |quality|
564
- config.image_quality = quality
565
- end
445
+ opts.on("-v", "--[no-]verbose", "Be verbose") do |value|
446
+ config.verbose = value
447
+ end
566
448
 
567
- opts.on("--style", "--image_style STYLE", "Style for image generation") do |style|
568
- config.image_style = style
569
- end
449
+ opts.on("--refresh DAYS", Integer, "Refresh models database interval in days") do |days|
450
+ config.refresh = days || 0
451
+ end
570
452
 
571
- opts.on("--system_prompt PROMPT_ID", "System prompt ID to use for chat sessions") do |prompt_id|
572
- config.system_prompt = prompt_id
573
- end
453
+ opts.on("--dump FILE", "Dump config to file") do |file|
454
+ config.dump_file = file
455
+ end
574
456
 
575
- ###################################################
576
- # AI model parameters
577
- opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
578
- config.temperature = temp
579
- end
457
+ opts.on("--completion SHELL", "Show completion script for bash|zsh|fish - default is nil") do |shell|
458
+ config.completion = shell
459
+ end
580
460
 
581
- opts.on("--max_tokens TOKENS", Integer, "Maximum tokens for text generation") do |tokens|
582
- config.max_tokens = tokens
583
- end
461
+ opts.on("--version", "Show version") do
462
+ puts AIA::VERSION
463
+ exit
464
+ end
584
465
 
585
- opts.on("--top_p VALUE", Float, "Top-p sampling value") do |value|
586
- config.top_p = value
587
- end
466
+ opts.on("-h", "--help", "Prints this help") do
467
+ puts <<~HELP
588
468
 
589
- opts.on("--frequency_penalty VALUE", Float, "Frequency penalty") do |value|
590
- config.frequency_penalty = value
591
- end
469
+ AIA your AI Assistant
470
+ - designed for generative AI workflows,
471
+ - effortlessly manage AI prompts,
472
+ - integrate seamlessly with shell and embedded Ruby (ERB),
473
+ - run batch processes,
474
+ - engage in interactive chats,
475
+ - with user defined directives, tools and MCP clients.
592
476
 
593
- opts.on("--presence_penalty VALUE", Float, "Presence penalty") do |value|
594
- config.presence_penalty = value
595
- end
477
+ HELP
596
478
 
597
- opts.on("--dump FILE", "Dump config to file") do |file|
598
- config.dump_file = file
599
- end
479
+ puts opts
600
480
 
601
- opts.on("--completion SHELL", "Show completion script for bash|zsh|fish - default is nil") do |shell|
602
- config.completion = shell
603
- end
481
+ puts <<~EXTRA
604
482
 
605
- opts.on("--version", "Show version") do
606
- puts AIA::VERSION
607
- exit
608
- end
483
+ Explore Further:
484
+ - AIA Report an Issue: https://github.com/MadBomber/aia/issues
485
+ - AIA Documentation: https://github.com/madbomber/aia/blob/main/README.md
486
+ - AIA GitHub Repository: https://github.com/MadBomber/aia
487
+ - PromptManager Docs: https://github.com/MadBomber/prompt_manager/blob/main/README.md
488
+ - ERB Documentation: https://rubyapi.org/o/erb
489
+ - RubyLLM Tool Docs: https://rubyllm.com/guides/tools
490
+ - MCP Client Docs: https://github.com/patvice/ruby_llm-mcp/blob/main/README.md
609
491
 
610
- opts.on("-h", "--help", "Prints this help") do
611
- puts opts
612
- exit
613
- end
492
+ EXTRA
614
493
 
615
- opts.on("--rq LIBS", "--require LIBS", "Ruby libraries to require for Ruby directive") do |libs|
616
- config.require_libs ||= []
617
- config.require_libs += libs.split(',')
618
- end
494
+ exit
495
+ end
496
+ end
619
497
 
620
- opts.on("--tools PATH_LIST", "Add a tool(s)") do |a_path_list|
621
- config.tool_paths ||= []
498
+ def self.list_available_models(query)
499
+ # SMELL: mostly duplications the code in the vailable_models directive
500
+ # assumes that the adapter is for the ruby_llm gem
501
+ # should this be moved to the Utilities class as a common method?
622
502
 
623
- if a_path_list.empty?
624
- STDERR.puts "No list of paths for --tools option"
625
- exit 1
626
- else
627
- paths = a_path_list.split(',').map(&:strip).uniq
628
- end
503
+ if query.nil?
504
+ query = []
505
+ else
506
+ query = query.split(',')
507
+ end
629
508
 
630
- paths.each do |a_path|
631
- if File.exist?(a_path)
632
- if File.file?(a_path)
633
- if '.rb' == File.extname(a_path)
634
- config.tool_paths << a_path
635
- else
636
- STDERR.puts "file should have *.rb extension: #{a_path}"
637
- exit 1
638
- end
639
- elsif File.directory?(a_path)
640
- rb_files = Dir.glob(File.join(a_path, '**', '*.rb'))
641
- config.tool_paths += rb_files
642
- end
643
- else
644
- STDERR.puts "file/dir path is not valid: #{a_path}"
645
- exit 1
646
- end
647
- end
509
+ header = "\nAvailable LLMs"
510
+ header += " for #{query.join(' and ')}" if query
648
511
 
649
- config.tool_paths.uniq!
650
- end
512
+ puts header + ':'
513
+ puts
651
514
 
652
- opts.on("--at", "--allowed_tools TOOLS_LIST", "Allow only these tools to be used") do |tools_list|
653
- config.allowed_tools ||= []
654
- if tools_list.empty?
655
- STDERR.puts "No list of tool names provided for --allowed_tools option"
656
- exit 1
657
- else
658
- config.allowed_tools += tools_list.split(',').map(&:strip)
659
- config.allowed_tools.uniq!
660
- end
661
- end
515
+ q1 = query.select{|q| q.include?('_to_')}.map{|q| ':'==q[0] ? q[1...] : q}
516
+ q2 = query.reject{|q| q.include?('_to_')}
662
517
 
663
- opts.on("--rt", "--rejected_tools TOOLS_LIST", "Reject these tools") do |tools_list|
664
- config.rejected_tools ||= []
665
- if tools_list.empty?
666
- STDERR.puts "No list of tool names provided for --rejected_tools option"
667
- exit 1
668
- else
669
- config.rejected_tools += tools_list.split(',').map(&:strip)
670
- config.rejected_tools.uniq!
671
- end
672
- end
518
+ counter = 0
519
+
520
+ RubyLLM.models.all.each do |llm|
521
+ inputs = llm.modalities.input.join(',')
522
+ outputs = llm.modalities.output.join(',')
523
+ entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
524
+
525
+ if query.nil? || query.empty?
526
+ counter += 1
527
+ puts entry
528
+ next
673
529
  end
674
- opt_parser.parse!
675
- rescue => e
676
- STDERR.puts "ERROR: #{e.message}"
677
- STDERR.puts " use --help for usage report"
678
- exit 1
679
- end
680
530
 
681
- args = ARGV.dup
531
+ show_it = true
532
+ q1.each{|q| show_it &&= llm.modalities.send("#{q}?")}
533
+ q2.each{|q| show_it &&= entry.include?(q)}
682
534
 
683
- # Parse the command line arguments
684
- begin
685
- config.remaining_args = opt_parser.parse(args)
686
- rescue OptionParser::InvalidOption => e
687
- puts e.message
688
- puts opt_parser
689
- exit 1
535
+ if show_it
536
+ counter += 1
537
+ puts entry
538
+ end
690
539
  end
691
540
 
692
- config
693
- end
694
-
541
+ puts if counter > 0
542
+ puts "#{counter} LLMs matching your query"
543
+ puts
695
544
 
696
- def self.cf_options(file)
697
- config = OpenStruct.new
545
+ exit
546
+ end
698
547
 
548
+ def self.load_config_file(file, config)
699
549
  if File.exist?(file)
700
- ext = File.extname(file).downcase
550
+ ext = File.extname(file).downcase
701
551
  content = File.read(file)
702
552
 
703
553
  # Process ERB if filename ends with .erb
704
554
  if file.end_with?('.erb')
705
555
  content = ERB.new(content).result
706
- file = file.chomp('.erb')
556
+ file = file.chomp('.erb')
707
557
  File.write(file, content)
708
558
  end
709
559
 
@@ -717,21 +567,277 @@ module AIA
717
567
  end
718
568
 
719
569
  file_config.each do |key, value|
720
- config[key] = value
570
+ config[key.to_sym] = value
721
571
  end
722
572
  else
723
- STDERR.puts "WARNING:Config file not found: #{file}"
573
+ raise "Config file not found: #{file}"
574
+ end
575
+ end
576
+
577
+ def self.process_tools_option(a_path_list, config)
578
+ config.tool_paths ||= []
579
+
580
+ if a_path_list.empty?
581
+ STDERR.puts "No list of paths for --tools option"
582
+ exit 1
583
+ else
584
+ paths = a_path_list.split(',').map(&:strip).uniq
585
+ end
586
+
587
+ paths.each do |a_path|
588
+ if File.exist?(a_path)
589
+ if File.file?(a_path)
590
+ if '.rb' == File.extname(a_path)
591
+ config.tool_paths << a_path
592
+ else
593
+ STDERR.puts "file should have *.rb extension: #{a_path}"
594
+ exit 1
595
+ end
596
+ elsif File.directory?(a_path)
597
+ rb_files = Dir.glob(File.join(a_path, '*.rb'))
598
+ config.tool_paths += rb_files
599
+ end
600
+ else
601
+ STDERR.puts "file/dir path is not valid: #{a_path}"
602
+ exit 1
603
+ end
604
+ end
605
+
606
+ config.tool_paths.uniq!
607
+ end
608
+
609
+ def self.process_allowed_tools_option(tools_list, config)
610
+ config.allowed_tools ||= []
611
+ if tools_list.empty?
612
+ STDERR.puts "No list of tool names provided for --allowed_tools option"
613
+ exit 1
614
+ else
615
+ config.allowed_tools += tools_list.split(',').map(&:strip)
616
+ config.allowed_tools.uniq!
617
+ end
618
+ end
619
+
620
+ def self.process_rejected_tools_option(tools_list, config)
621
+ config.rejected_tools ||= []
622
+ if tools_list.empty?
623
+ STDERR.puts "No list of tool names provided for --rejected_tools option"
624
+ exit 1
625
+ else
626
+ config.rejected_tools += tools_list.split(',').map(&:strip)
627
+ config.rejected_tools.uniq!
628
+ end
629
+ end
630
+
631
+ def self.process_stdin_content
632
+ stdin_content = ''
633
+
634
+ if !STDIN.tty? && !STDIN.closed?
635
+ begin
636
+ stdin_content << "\n" + STDIN.read
637
+ STDIN.reopen('/dev/tty') # Reopen STDIN for interactive use
638
+ rescue => _
639
+ # If we can't reopen, continue without error
640
+ end
641
+ end
642
+
643
+ stdin_content
644
+ end
645
+
646
+ def self.process_prompt_id_from_args(config, remaining_args)
647
+ return if remaining_args.empty?
648
+
649
+ maybe_id = remaining_args.first
650
+ maybe_id_plus = File.join(config.prompts_dir, maybe_id + config.prompt_extname)
651
+
652
+ if AIA.bad_file?(maybe_id) && AIA.good_file?(maybe_id_plus)
653
+ config.prompt_id = remaining_args.shift
654
+ end
655
+ end
656
+
657
+ def self.validate_and_set_context_files(config, remaining_args)
658
+ return if remaining_args.empty?
659
+
660
+ bad_files = remaining_args.reject { |filename| AIA.good_file?(filename) }
661
+ if bad_files.any?
662
+ STDERR.puts "Error: The following files do not exist: #{bad_files.join(', ')}"
663
+ exit 1
664
+ end
665
+
666
+ config.context_files ||= []
667
+ config.context_files += remaining_args
668
+ end
669
+
670
+ def self.handle_executable_prompt(config)
671
+ return unless config.executable_prompt && config.context_files && !config.context_files.empty?
672
+
673
+ config.executable_prompt_file = config.context_files.pop
674
+ end
675
+
676
+ def self.validate_required_prompt_id(config)
677
+ return unless config.prompt_id.nil? && !config.chat && !config.fuzzy
678
+
679
+ STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
680
+ exit 1
681
+ end
682
+
683
+ def self.process_role_configuration(config)
684
+ return if config.role.empty?
685
+
686
+ unless config.roles_prefix.empty?
687
+ unless config.role.start_with?(config.roles_prefix)
688
+ config.role.prepend "#{config.roles_prefix}/"
689
+ end
690
+ end
691
+
692
+ config.roles_dir ||= File.join(config.prompts_dir, config.roles_prefix)
693
+
694
+ if config.prompt_id.nil? || config.prompt_id.empty?
695
+ if !config.role.nil? && !config.role.empty?
696
+ config.prompt_id = config.role
697
+ config.pipeline.prepend config.prompt_id
698
+ config.role = ''
699
+ end
700
+ end
701
+ end
702
+
703
+ def self.handle_fuzzy_search_prompt_id(config)
704
+ return unless config.fuzzy && config.prompt_id.empty?
705
+
706
+ # When fuzzy search is enabled but no prompt ID is provided,
707
+ # set a special value to trigger fuzzy search without an initial query
708
+ # SMELL: This feels like a cludge
709
+ config.prompt_id = '__FUZZY_SEARCH__'
710
+ end
711
+
712
+ def self.normalize_boolean_flags(config)
713
+ normalize_boolean_flag(config, :chat)
714
+ normalize_boolean_flag(config, :fuzzy)
715
+ end
716
+
717
+ def self.normalize_boolean_flag(config, flag)
718
+ return if [TrueClass, FalseClass].include?(config[flag].class)
719
+
720
+ config[flag] = if config[flag].nil? || config[flag].empty?
721
+ false
722
+ else
723
+ true
724
+ end
725
+ end
726
+
727
+ def self.handle_completion_script(config)
728
+ return unless config.completion
729
+
730
+ generate_completion_script(config.completion)
731
+ exit
732
+ end
733
+
734
+ def self.validate_final_prompt_requirements(config)
735
+ # Only require a prompt_id if we're not in chat mode, not using fuzzy search, and no context files
736
+ if !config.chat && !config.fuzzy && (config.prompt_id.nil? || config.prompt_id.empty?) && (!config.context_files || config.context_files.empty?)
737
+ STDERR.puts "Error: A prompt ID is required unless using --chat, --fuzzy, or providing context files. Use -h or --help for help."
738
+ exit 1
724
739
  end
725
740
 
726
- if config.last_refresh
727
- if config.last_refresh.is_a? String
728
- config.last_refresh = Date.strptime(config.last_refresh, '%Y-%m-%d')
741
+ # If we're in chat mode with context files but no prompt_id, that's valid
742
+ # This is handled implicitly - no action needed
743
+ end
744
+
745
+ def self.configure_prompt_manager(config)
746
+ return unless config.parameter_regex
747
+
748
+ PromptManager::Prompt.parameter_regex = Regexp.new(config.parameter_regex)
749
+ end
750
+
751
+ def self.prepare_pipeline(config)
752
+ return if config.prompt_id.nil? || config.prompt_id.empty? || config.prompt_id == config.pipeline.first
753
+
754
+ config.pipeline.prepend config.prompt_id
755
+ end
756
+
757
+ def self.validate_pipeline_prompts(config)
758
+ return if config.pipeline.empty?
759
+
760
+ and_exit = false
761
+
762
+ config.pipeline.each do |prompt_id|
763
+ # Skip empty prompt IDs (can happen in chat-only mode)
764
+ next if prompt_id.nil? || prompt_id.empty?
765
+
766
+ prompt_file_path = File.join(config.prompts_dir, "#{prompt_id}.txt")
767
+ unless File.exist?(prompt_file_path)
768
+ STDERR.puts "Error: Prompt ID '#{prompt_id}' does not exist at #{prompt_file_path}"
769
+ and_exit = true
729
770
  end
730
771
  end
731
772
 
773
+ exit(1) if and_exit
774
+ end
775
+
776
+ def self.parse_remaining_arguments(opt_parser, config)
777
+ args = ARGV.dup
778
+
779
+ # Parse the command line arguments
780
+ begin
781
+ config.remaining_args = opt_parser.parse(args)
782
+ rescue OptionParser::InvalidOption => e
783
+ puts e.message
784
+ puts opt_parser
785
+ exit 1
786
+ end
787
+ end
788
+
789
+
790
+ def self.cf_options(file)
791
+ config = OpenStruct.new
792
+
793
+ if File.exist?(file)
794
+ content = read_and_process_config_file(file)
795
+ file_config = parse_config_content(content, File.extname(file).downcase)
796
+ apply_file_config_to_struct(config, file_config)
797
+ else
798
+ STDERR.puts "WARNING:Config file not found: #{file}"
799
+ end
800
+
801
+ normalize_last_refresh_date(config)
732
802
  config
733
803
  end
734
804
 
805
+ def self.read_and_process_config_file(file)
806
+ content = File.read(file)
807
+
808
+ # Process ERB if filename ends with .erb
809
+ if file.end_with?('.erb')
810
+ content = ERB.new(content).result
811
+ processed_file = file.chomp('.erb')
812
+ File.write(processed_file, content)
813
+ end
814
+
815
+ content
816
+ end
817
+
818
+ def self.parse_config_content(content, ext)
819
+ case ext
820
+ when '.yml', '.yaml'
821
+ YAML.safe_load(content, permitted_classes: [Symbol], symbolize_names: true)
822
+ when '.toml'
823
+ TomlRB.parse(content)
824
+ else
825
+ raise "Unsupported config file format: #{ext}"
826
+ end
827
+ end
828
+
829
+ def self.apply_file_config_to_struct(config, file_config)
830
+ file_config.each do |key, value|
831
+ config[key] = value
832
+ end
833
+ end
834
+
835
+ def self.normalize_last_refresh_date(config)
836
+ return unless config.last_refresh&.is_a?(String)
837
+
838
+ config.last_refresh = Date.strptime(config.last_refresh, '%Y-%m-%d')
839
+ end
840
+
735
841
 
736
842
  def self.generate_completion_script(shell)
737
843
  script_path = File.join(File.dirname(__FILE__), "aia_completion.#{shell}")