aia 0.9.24 → 0.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.version +1 -1
- data/CHANGELOG.md +84 -3
- data/README.md +179 -59
- data/bin/aia +6 -0
- data/docs/cli-reference.md +145 -72
- data/docs/configuration.md +156 -19
- data/docs/examples/tools/index.md +2 -2
- data/docs/faq.md +11 -11
- data/docs/guides/available-models.md +11 -11
- data/docs/guides/basic-usage.md +18 -17
- data/docs/guides/chat.md +57 -11
- data/docs/guides/executable-prompts.md +15 -15
- data/docs/guides/first-prompt.md +2 -2
- data/docs/guides/getting-started.md +6 -6
- data/docs/guides/image-generation.md +24 -24
- data/docs/guides/local-models.md +2 -2
- data/docs/guides/models.md +96 -18
- data/docs/guides/tools.md +4 -4
- data/docs/installation.md +2 -2
- data/docs/prompt_management.md +11 -11
- data/docs/security.md +3 -3
- data/docs/workflows-and-pipelines.md +1 -1
- data/examples/README.md +6 -6
- data/examples/headlines +3 -3
- data/lib/aia/aia_completion.bash +2 -2
- data/lib/aia/aia_completion.fish +4 -4
- data/lib/aia/aia_completion.zsh +2 -2
- data/lib/aia/chat_processor_service.rb +31 -21
- data/lib/aia/config/cli_parser.rb +403 -403
- data/lib/aia/config/config_section.rb +87 -0
- data/lib/aia/config/defaults.yml +219 -0
- data/lib/aia/config/defaults_loader.rb +147 -0
- data/lib/aia/config/mcp_parser.rb +151 -0
- data/lib/aia/config/model_spec.rb +67 -0
- data/lib/aia/config/validator.rb +185 -136
- data/lib/aia/config.rb +336 -17
- data/lib/aia/directive_processor.rb +14 -6
- data/lib/aia/directives/configuration.rb +24 -10
- data/lib/aia/directives/models.rb +3 -4
- data/lib/aia/directives/utility.rb +3 -2
- data/lib/aia/directives/web_and_file.rb +50 -47
- data/lib/aia/logger.rb +328 -0
- data/lib/aia/prompt_handler.rb +18 -22
- data/lib/aia/ruby_llm_adapter.rb +572 -69
- data/lib/aia/session.rb +9 -8
- data/lib/aia/ui_presenter.rb +20 -16
- data/lib/aia/utility.rb +50 -18
- data/lib/aia.rb +91 -66
- data/lib/extensions/ruby_llm/modalities.rb +2 -0
- data/mcp_servers/apple-mcp.json +8 -0
- data/mcp_servers/mcp_server_chart.json +11 -0
- data/mcp_servers/playwright_one.json +8 -0
- data/mcp_servers/playwright_two.json +8 -0
- data/mcp_servers/tavily_mcp_server.json +8 -0
- metadata +83 -25
- data/lib/aia/config/base.rb +0 -308
- data/lib/aia/config/defaults.rb +0 -91
- data/lib/aia/config/file_loader.rb +0 -163
- data/mcp_servers/imcp.json +0 -7
- data/mcp_servers/launcher.json +0 -11
- data/mcp_servers/timeserver.json +0 -8
|
@@ -1,531 +1,531 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
1
3
|
# lib/aia/config/cli_parser.rb
|
|
4
|
+
#
|
|
5
|
+
# Parses command-line arguments and returns a hash of overrides
|
|
6
|
+
# for the Config class.
|
|
2
7
|
|
|
3
8
|
require 'optparse'
|
|
4
|
-
|
|
9
|
+
require_relative 'model_spec'
|
|
5
10
|
|
|
6
11
|
module AIA
|
|
7
|
-
module
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
parse_remaining_arguments(opt_parser, config)
|
|
23
|
-
config
|
|
24
|
-
end
|
|
25
|
-
|
|
26
|
-
def create_option_parser(config)
|
|
27
|
-
OptionParser.new do |opts|
|
|
28
|
-
setup_banner(opts)
|
|
29
|
-
setup_mode_options(opts, config)
|
|
30
|
-
setup_adapter_options(opts, config)
|
|
31
|
-
setup_model_options(opts, config)
|
|
32
|
-
setup_file_options(opts, config)
|
|
33
|
-
setup_prompt_options(opts, config)
|
|
34
|
-
setup_ai_parameters(opts, config)
|
|
35
|
-
setup_audio_image_options(opts, config)
|
|
36
|
-
setup_tool_options(opts, config)
|
|
37
|
-
setup_utility_options(opts, config)
|
|
38
|
-
end
|
|
12
|
+
module CLIParser
|
|
13
|
+
class << self
|
|
14
|
+
# Parse CLI arguments and return a hash of overrides
|
|
15
|
+
#
|
|
16
|
+
# @return [Hash] configuration overrides from CLI
|
|
17
|
+
def parse
|
|
18
|
+
options = {}
|
|
19
|
+
|
|
20
|
+
begin
|
|
21
|
+
parser = create_option_parser(options)
|
|
22
|
+
parser.parse!
|
|
23
|
+
rescue OptionParser::InvalidOption, OptionParser::MissingArgument => e
|
|
24
|
+
STDERR.puts "ERROR: #{e.message}"
|
|
25
|
+
STDERR.puts " use --help for usage report"
|
|
26
|
+
exit 1
|
|
39
27
|
end
|
|
40
28
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
29
|
+
# Store remaining args for prompt_id and context files
|
|
30
|
+
options[:remaining_args] = ARGV.dup
|
|
31
|
+
|
|
32
|
+
options
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
def create_option_parser(options)
|
|
38
|
+
OptionParser.new do |opts|
|
|
39
|
+
setup_banner(opts)
|
|
40
|
+
setup_mode_options(opts, options)
|
|
41
|
+
setup_adapter_options(opts, options)
|
|
42
|
+
setup_model_options(opts, options)
|
|
43
|
+
setup_file_options(opts, options)
|
|
44
|
+
setup_prompt_options(opts, options)
|
|
45
|
+
setup_ai_parameters(opts, options)
|
|
46
|
+
setup_audio_image_options(opts, options)
|
|
47
|
+
setup_tool_options(opts, options)
|
|
48
|
+
setup_utility_options(opts, options)
|
|
45
49
|
end
|
|
50
|
+
end
|
|
46
51
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
+
def setup_banner(opts)
|
|
53
|
+
opts.banner = "Usage: aia [options] [PROMPT_ID] [CONTEXT_FILE]*\n" +
|
|
54
|
+
" aia --chat [PROMPT_ID] [CONTEXT_FILE]*\n" +
|
|
55
|
+
" aia --chat [CONTEXT_FILE]*"
|
|
56
|
+
end
|
|
52
57
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
end
|
|
58
|
-
config.fuzzy = true
|
|
59
|
-
end
|
|
58
|
+
def setup_mode_options(opts, options)
|
|
59
|
+
opts.on("--chat", "Begin a chat session with the LLM after processing all prompts in the pipeline.") do
|
|
60
|
+
options[:chat] = true
|
|
61
|
+
end
|
|
60
62
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
+
opts.on("-f", "--fuzzy", "Use fuzzy matching for prompt search") do
|
|
64
|
+
unless system("which fzf > /dev/null 2>&1")
|
|
65
|
+
STDERR.puts "Error: 'fzf' is not installed. Please install 'fzf' to use the --fuzzy option."
|
|
66
|
+
exit 1
|
|
63
67
|
end
|
|
68
|
+
options[:fuzzy] = true
|
|
64
69
|
end
|
|
65
70
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
if valid_adapters.include? adapter
|
|
71
|
-
config.adapter = adapter
|
|
72
|
-
else
|
|
73
|
-
STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
|
|
74
|
-
exit 1
|
|
75
|
-
end
|
|
76
|
-
end
|
|
71
|
+
opts.on("--terse", "Adds a special instruction to the prompt asking the AI to keep responses short and to the point") do
|
|
72
|
+
options[:terse] = true
|
|
73
|
+
end
|
|
74
|
+
end
|
|
77
75
|
|
|
78
|
-
|
|
79
|
-
|
|
76
|
+
def setup_adapter_options(opts, options)
|
|
77
|
+
opts.on("--adapter ADAPTER", "Interface that adapts AIA to the LLM") do |adapter|
|
|
78
|
+
adapter.downcase!
|
|
79
|
+
valid_adapters = %w[ruby_llm]
|
|
80
|
+
if valid_adapters.include?(adapter)
|
|
81
|
+
options[:adapter] = adapter
|
|
82
|
+
else
|
|
83
|
+
STDERR.puts "ERROR: Invalid adapter #{adapter} must be one of these: #{valid_adapters.join(', ')}"
|
|
84
|
+
exit 1
|
|
80
85
|
end
|
|
81
86
|
end
|
|
82
87
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
88
|
+
opts.on('--available-models [QUERY]', 'List (then exit) available models that match the optional query') do |query|
|
|
89
|
+
list_available_models(query)
|
|
90
|
+
end
|
|
91
|
+
end
|
|
87
92
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
93
|
+
def setup_model_options(opts, options)
|
|
94
|
+
opts.on("-m MODEL", "--model MODEL", "Name of the LLM model(s) to use. Format: MODEL[=ROLE][,MODEL[=ROLE]]...") do |model_string|
|
|
95
|
+
options[:models] = parse_models_with_roles(model_string)
|
|
96
|
+
end
|
|
91
97
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
end
|
|
98
|
+
opts.on("--[no-]consensus", "Enable/disable consensus mode for multi-model responses") do |consensus|
|
|
99
|
+
options[:consensus] = consensus
|
|
100
|
+
end
|
|
96
101
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
102
|
+
opts.on("--list-roles", "List available role files and exit") do
|
|
103
|
+
list_available_roles
|
|
104
|
+
exit 0
|
|
105
|
+
end
|
|
100
106
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
end
|
|
107
|
+
opts.on("--sm", "--speech-model MODEL", "Speech model to use") do |model|
|
|
108
|
+
options[:speech_model] = model
|
|
104
109
|
end
|
|
105
110
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
111
|
+
opts.on("--tm", "--transcription-model MODEL", "Transcription model to use") do |model|
|
|
112
|
+
options[:transcription_model] = model
|
|
113
|
+
end
|
|
114
|
+
end
|
|
110
115
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
config.out_file = 'temp.md'
|
|
116
|
-
else # File name provided
|
|
117
|
-
config.out_file = File.expand_path(file, Dir.pwd)
|
|
118
|
-
end
|
|
119
|
-
end
|
|
116
|
+
def setup_file_options(opts, options)
|
|
117
|
+
opts.on("-c", "--config-file FILE", "Load additional config file") do |file|
|
|
118
|
+
options[:extra_config_file] = file
|
|
119
|
+
end
|
|
120
120
|
|
|
121
|
-
|
|
122
|
-
|
|
121
|
+
opts.on("-o", "--[no-]output [FILE]", "Output file (default: temp.md)") do |file|
|
|
122
|
+
if file == false
|
|
123
|
+
options[:output] = nil
|
|
124
|
+
elsif file.nil?
|
|
125
|
+
options[:output] = 'temp.md'
|
|
126
|
+
else
|
|
127
|
+
options[:output] = File.expand_path(file, Dir.pwd)
|
|
123
128
|
end
|
|
129
|
+
end
|
|
124
130
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
131
|
+
opts.on("-a", "--[no-]append", "Append to output file instead of overwriting") do |append|
|
|
132
|
+
options[:append] = append
|
|
133
|
+
end
|
|
128
134
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
end
|
|
135
|
+
opts.on("--[no-]history-file [FILE]", "Conversation history file") do |file|
|
|
136
|
+
options[:history_file] = file
|
|
132
137
|
end
|
|
133
138
|
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
139
|
+
opts.on("--md", "--[no-]markdown", "Format with Markdown") do |md|
|
|
140
|
+
options[:markdown] = md
|
|
141
|
+
end
|
|
142
|
+
end
|
|
138
143
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
144
|
+
def setup_prompt_options(opts, options)
|
|
145
|
+
opts.on("--prompts-dir DIR", "Directory containing prompt files") do |dir|
|
|
146
|
+
options[:prompts_dir] = dir
|
|
147
|
+
end
|
|
142
148
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
149
|
+
opts.on("--roles-prefix PREFIX", "Subdirectory name for role files (default: roles)") do |prefix|
|
|
150
|
+
options[:roles_prefix] = prefix
|
|
151
|
+
end
|
|
146
152
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
end
|
|
153
|
+
opts.on("-r", "--role ROLE_ID", "Role ID to prepend to prompt") do |role|
|
|
154
|
+
options[:role] = role
|
|
155
|
+
end
|
|
151
156
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
157
|
+
opts.on("-n", "--next PROMPT_ID", "Next prompt to process") do |next_prompt|
|
|
158
|
+
options[:pipeline] ||= []
|
|
159
|
+
options[:pipeline] << next_prompt
|
|
160
|
+
end
|
|
156
161
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
162
|
+
opts.on("-p PROMPTS", "--pipeline PROMPTS", "Pipeline of comma-separated prompt IDs to process") do |pipeline|
|
|
163
|
+
options[:pipeline] ||= []
|
|
164
|
+
options[:pipeline] += pipeline.split(',').map(&:strip)
|
|
165
|
+
end
|
|
160
166
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
167
|
+
opts.on("-x", "--[no-]exec", "Used to designate an executable prompt file") do |value|
|
|
168
|
+
options[:executable_prompt] = value
|
|
169
|
+
end
|
|
164
170
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
end
|
|
171
|
+
opts.on("--system-prompt PROMPT_ID", "System prompt ID to use for chat sessions") do |prompt_id|
|
|
172
|
+
options[:system_prompt] = prompt_id
|
|
168
173
|
end
|
|
169
174
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
175
|
+
opts.on('--regex PATTERN', 'Regex pattern to extract parameters from prompt text') do |pattern|
|
|
176
|
+
options[:parameter_regex] = pattern
|
|
177
|
+
end
|
|
178
|
+
end
|
|
174
179
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
180
|
+
def setup_ai_parameters(opts, options)
|
|
181
|
+
opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
|
|
182
|
+
options[:temperature] = temp
|
|
183
|
+
end
|
|
178
184
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
185
|
+
opts.on("--max-tokens TOKENS", Integer, "Maximum tokens for text generation") do |tokens|
|
|
186
|
+
options[:max_tokens] = tokens
|
|
187
|
+
end
|
|
182
188
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
189
|
+
opts.on("--top-p VALUE", Float, "Top-p sampling value") do |value|
|
|
190
|
+
options[:top_p] = value
|
|
191
|
+
end
|
|
186
192
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
end
|
|
193
|
+
opts.on("--frequency-penalty VALUE", Float, "Frequency penalty") do |value|
|
|
194
|
+
options[:frequency_penalty] = value
|
|
190
195
|
end
|
|
191
196
|
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
197
|
+
opts.on("--presence-penalty VALUE", Float, "Presence penalty") do |value|
|
|
198
|
+
options[:presence_penalty] = value
|
|
199
|
+
end
|
|
200
|
+
end
|
|
196
201
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
202
|
+
def setup_audio_image_options(opts, options)
|
|
203
|
+
opts.on("--speak", "Convert text to audio and play it") do
|
|
204
|
+
options[:speak] = true
|
|
205
|
+
end
|
|
200
206
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
207
|
+
opts.on("--voice VOICE", "Voice to use for speech") do |voice|
|
|
208
|
+
options[:voice] = voice
|
|
209
|
+
end
|
|
204
210
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
211
|
+
opts.on("--is", "--image-size SIZE", "Image size for image generation") do |size|
|
|
212
|
+
options[:image_size] = size
|
|
213
|
+
end
|
|
208
214
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
end
|
|
215
|
+
opts.on("--iq", "--image-quality QUALITY", "Image quality for image generation") do |quality|
|
|
216
|
+
options[:image_quality] = quality
|
|
212
217
|
end
|
|
213
218
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
end
|
|
219
|
+
opts.on("--style", "--image-style STYLE", "Style for image generation") do |style|
|
|
220
|
+
options[:image_style] = style
|
|
221
|
+
end
|
|
222
|
+
end
|
|
219
223
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
224
|
+
def setup_tool_options(opts, options)
|
|
225
|
+
opts.on("--rq LIBS", "--require LIBS", "Ruby libraries to require for Ruby directive") do |libs|
|
|
226
|
+
options[:require_libs] ||= []
|
|
227
|
+
options[:require_libs] += libs.split(',')
|
|
228
|
+
end
|
|
223
229
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
230
|
+
opts.on("--tools PATH_LIST", "Add tool(s) by path") do |path_list|
|
|
231
|
+
options[:tool_paths] = process_tools_paths(path_list)
|
|
232
|
+
end
|
|
227
233
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
234
|
+
opts.on("--at", "--allowed-tools TOOLS_LIST", "Allow only these tools to be used") do |tools_list|
|
|
235
|
+
options[:allowed_tools] ||= []
|
|
236
|
+
options[:allowed_tools] += tools_list.split(',').map(&:strip)
|
|
231
237
|
end
|
|
232
238
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
239
|
+
opts.on("--rt", "--rejected-tools TOOLS_LIST", "Reject these tools") do |tools_list|
|
|
240
|
+
options[:rejected_tools] ||= []
|
|
241
|
+
options[:rejected_tools] += tools_list.split(',').map(&:strip)
|
|
242
|
+
end
|
|
243
|
+
end
|
|
237
244
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
245
|
+
def setup_utility_options(opts, options)
|
|
246
|
+
opts.on("-d", "--debug", "Enable debug output and set all loggers to DEBUG level") do
|
|
247
|
+
options[:debug] = true
|
|
248
|
+
options[:log_level_override] = 'debug'
|
|
249
|
+
$DEBUG_ME = true
|
|
250
|
+
end
|
|
241
251
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
252
|
+
opts.on("--no-debug", "Disable debug output") do
|
|
253
|
+
options[:debug] = false
|
|
254
|
+
$DEBUG_ME = false
|
|
255
|
+
end
|
|
245
256
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
257
|
+
opts.on("--info", "Set all loggers to INFO level") do
|
|
258
|
+
options[:log_level_override] = 'info'
|
|
259
|
+
end
|
|
249
260
|
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
261
|
+
opts.on("--warn", "Set all loggers to WARN level") do
|
|
262
|
+
options[:log_level_override] = 'warn'
|
|
263
|
+
end
|
|
253
264
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
265
|
+
opts.on("--error", "Set all loggers to ERROR level") do
|
|
266
|
+
options[:log_level_override] = 'error'
|
|
267
|
+
end
|
|
257
268
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
269
|
+
opts.on("--fatal", "Set all loggers to FATAL level") do
|
|
270
|
+
options[:log_level_override] = 'fatal'
|
|
271
|
+
end
|
|
261
272
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
end
|
|
273
|
+
opts.on("--log-to FILE", "Direct all loggers to FILE") do |file|
|
|
274
|
+
options[:log_file_override] = file
|
|
275
|
+
end
|
|
266
276
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
end
|
|
277
|
+
opts.on("-v", "--[no-]verbose", "Be verbose") do |value|
|
|
278
|
+
options[:verbose] = value
|
|
279
|
+
end
|
|
271
280
|
|
|
272
|
-
|
|
273
|
-
|
|
281
|
+
opts.on("--refresh DAYS", Integer, "Refresh models database interval in days") do |days|
|
|
282
|
+
options[:refresh] = days || 0
|
|
283
|
+
end
|
|
274
284
|
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
- integrate seamlessly with shell and embedded Ruby (ERB),
|
|
279
|
-
- run batch processes,
|
|
280
|
-
- engage in interactive chats,
|
|
281
|
-
- with user defined directives, tools and MCP clients.
|
|
285
|
+
opts.on("--dump FILE", "Dump config to file") do |file|
|
|
286
|
+
options[:dump_file] = file
|
|
287
|
+
end
|
|
282
288
|
|
|
283
|
-
|
|
289
|
+
opts.on("--completion SHELL", "Show completion script for bash|zsh|fish") do |shell|
|
|
290
|
+
options[:completion] = shell
|
|
291
|
+
end
|
|
284
292
|
|
|
285
|
-
|
|
293
|
+
opts.on("--tokens", "Display token usage in chat mode") do
|
|
294
|
+
options[:tokens] = true
|
|
295
|
+
end
|
|
286
296
|
|
|
287
|
-
|
|
297
|
+
opts.on("--cost", "Include cost calculations with token usage") do
|
|
298
|
+
options[:cost] = true
|
|
299
|
+
options[:tokens] = true # --cost implies --tokens
|
|
300
|
+
end
|
|
288
301
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
- PromptManager Docs: https://github.com/MadBomber/prompt_manager/blob/main/README.md
|
|
294
|
-
- ERB Documentation: https://rubyapi.org/o/erb
|
|
295
|
-
- RubyLLM Tool Docs: https://rubyllm.com/guides/tools
|
|
296
|
-
- MCP Client Docs: https://github.com/patvice/ruby_llm-mcp/blob/main/README.md
|
|
302
|
+
opts.on("--mcp FILE", "Load MCP server(s) from JSON file (can be used multiple times)") do |file|
|
|
303
|
+
options[:mcp_files] ||= []
|
|
304
|
+
options[:mcp_files] << file
|
|
305
|
+
end
|
|
297
306
|
|
|
298
|
-
|
|
307
|
+
opts.on("--no-mcp", "Disable all MCP server processing") do
|
|
308
|
+
options[:no_mcp] = true
|
|
309
|
+
end
|
|
299
310
|
|
|
300
|
-
|
|
301
|
-
|
|
311
|
+
opts.on("--version", "Show version") do
|
|
312
|
+
puts AIA::VERSION
|
|
313
|
+
exit
|
|
302
314
|
end
|
|
303
315
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
model_counts = Hash.new(0)
|
|
316
|
+
opts.on("-h", "--help", "Prints this help") do
|
|
317
|
+
puts <<~HELP
|
|
307
318
|
|
|
308
|
-
|
|
309
|
-
|
|
319
|
+
AIA your AI Assistant
|
|
320
|
+
- designed for generative AI workflows,
|
|
321
|
+
- effortlessly manage AI prompts,
|
|
322
|
+
- integrate seamlessly with shell and embedded Ruby (ERB),
|
|
323
|
+
- run batch processes,
|
|
324
|
+
- engage in interactive chats,
|
|
325
|
+
- with user defined directives, tools and MCP clients.
|
|
310
326
|
|
|
311
|
-
|
|
312
|
-
if spec =~ /^=|=$/
|
|
313
|
-
raise ArgumentError, "Invalid model syntax: '#{spec}'. Expected format: MODEL[=ROLE]"
|
|
314
|
-
end
|
|
327
|
+
HELP
|
|
315
328
|
|
|
316
|
-
|
|
317
|
-
# Explicit role: "model=role" or "provider/model=role"
|
|
318
|
-
model_name, role_name = spec.split('=', 2)
|
|
319
|
-
model_name.strip!
|
|
320
|
-
role_name.strip!
|
|
321
|
-
|
|
322
|
-
# Validate role file exists (fail fast)
|
|
323
|
-
validate_role_exists(role_name)
|
|
324
|
-
|
|
325
|
-
# Track instance count for duplicates
|
|
326
|
-
model_counts[model_name] += 1
|
|
327
|
-
instance = model_counts[model_name]
|
|
328
|
-
|
|
329
|
-
models << {
|
|
330
|
-
model: model_name,
|
|
331
|
-
role: role_name,
|
|
332
|
-
instance: instance,
|
|
333
|
-
internal_id: instance > 1 ? "#{model_name}##{instance}" : model_name
|
|
334
|
-
}
|
|
335
|
-
else
|
|
336
|
-
# No explicit role, will use default from -r/--role
|
|
337
|
-
model_counts[spec] += 1
|
|
338
|
-
instance = model_counts[spec]
|
|
339
|
-
|
|
340
|
-
models << {
|
|
341
|
-
model: spec,
|
|
342
|
-
role: nil,
|
|
343
|
-
instance: instance,
|
|
344
|
-
internal_id: instance > 1 ? "#{spec}##{instance}" : spec
|
|
345
|
-
}
|
|
346
|
-
end
|
|
347
|
-
end
|
|
329
|
+
puts opts
|
|
348
330
|
|
|
349
|
-
|
|
331
|
+
puts <<~EXTRA
|
|
332
|
+
|
|
333
|
+
Explore Further:
|
|
334
|
+
- AIA Report an Issue: https://github.com/MadBomber/aia/issues
|
|
335
|
+
- AIA Documentation: https://github.com/MadBomber/aia/blob/main/README.md
|
|
336
|
+
- AIA GitHub Repository: https://github.com/MadBomber/aia
|
|
337
|
+
- PromptManager Docs: https://github.com/MadBomber/prompt_manager/blob/main/README.md
|
|
338
|
+
- ERB Documentation: https://rubyapi.org/o/erb
|
|
339
|
+
- RubyLLM Tool Docs: https://rubyllm.com/guides/tools
|
|
340
|
+
- MCP Client Docs: https://github.com/patvice/ruby_llm-mcp/blob/main/README.md
|
|
341
|
+
|
|
342
|
+
EXTRA
|
|
343
|
+
|
|
344
|
+
exit
|
|
350
345
|
end
|
|
346
|
+
end
|
|
351
347
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
348
|
+
# Parse model string into array of ModelSpec-compatible hashes
|
|
349
|
+
#
|
|
350
|
+
# @param model_string [String] comma-separated models with optional roles
|
|
351
|
+
# @return [Array<Hash>] array of model specs
|
|
352
|
+
def parse_models_with_roles(model_string)
|
|
353
|
+
models = []
|
|
354
|
+
model_counts = Hash.new(0)
|
|
356
355
|
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
356
|
+
model_string.split(',').each do |spec|
|
|
357
|
+
spec.strip!
|
|
358
|
+
|
|
359
|
+
if spec =~ /^=|=$/
|
|
360
|
+
raise ArgumentError, "Invalid model syntax: '#{spec}'. Expected format: MODEL[=ROLE]"
|
|
360
361
|
end
|
|
361
362
|
|
|
362
|
-
|
|
363
|
+
if spec.include?('=')
|
|
364
|
+
model_name, role_name = spec.split('=', 2)
|
|
365
|
+
model_name.strip!
|
|
366
|
+
role_name.strip!
|
|
363
367
|
|
|
364
|
-
|
|
365
|
-
available_roles = list_available_role_names(prompts_dir, roles_prefix)
|
|
368
|
+
validate_role_exists(role_name)
|
|
366
369
|
|
|
367
|
-
|
|
370
|
+
model_counts[model_name] += 1
|
|
371
|
+
instance = model_counts[model_name]
|
|
368
372
|
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
373
|
+
models << {
|
|
374
|
+
name: model_name,
|
|
375
|
+
role: role_name,
|
|
376
|
+
instance: instance,
|
|
377
|
+
internal_id: instance > 1 ? "#{model_name}##{instance}" : model_name
|
|
378
|
+
}
|
|
379
|
+
else
|
|
380
|
+
model_counts[spec] += 1
|
|
381
|
+
instance = model_counts[spec]
|
|
377
382
|
|
|
378
|
-
|
|
383
|
+
models << {
|
|
384
|
+
name: spec,
|
|
385
|
+
role: nil,
|
|
386
|
+
instance: instance,
|
|
387
|
+
internal_id: instance > 1 ? "#{spec}##{instance}" : spec
|
|
388
|
+
}
|
|
379
389
|
end
|
|
380
390
|
end
|
|
381
391
|
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
roles_prefix = ENV.fetch('AIA_ROLES_PREFIX', 'roles')
|
|
385
|
-
roles_dir = File.join(prompts_dir, roles_prefix)
|
|
392
|
+
models
|
|
393
|
+
end
|
|
386
394
|
|
|
387
|
-
|
|
388
|
-
|
|
395
|
+
def validate_role_exists(role_id)
|
|
396
|
+
prompts_dir = ENV.fetch('AIA_PROMPTS__DIR', File.join(ENV['HOME'], '.prompts'))
|
|
397
|
+
roles_prefix = ENV.fetch('AIA_PROMPTS__ROLES_PREFIX', 'roles')
|
|
389
398
|
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
puts "Create .txt files in this directory to define roles."
|
|
393
|
-
else
|
|
394
|
-
puts "Available roles in #{roles_dir}:"
|
|
395
|
-
roles.each { |role| puts " - #{role}" }
|
|
396
|
-
end
|
|
397
|
-
else
|
|
398
|
-
puts "No roles directory found at #{roles_dir}"
|
|
399
|
-
puts "Create this directory and add role files to use roles."
|
|
400
|
-
end
|
|
399
|
+
unless role_id.start_with?(roles_prefix)
|
|
400
|
+
role_id = "#{roles_prefix}/#{role_id}"
|
|
401
401
|
end
|
|
402
402
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
403
|
+
role_file_path = File.join(prompts_dir, "#{role_id}.txt")
|
|
404
|
+
|
|
405
|
+
unless File.exist?(role_file_path)
|
|
406
|
+
available_roles = list_available_role_names(prompts_dir, roles_prefix)
|
|
407
|
+
|
|
408
|
+
error_msg = "Role file not found: #{role_file_path}\n\n"
|
|
409
|
+
|
|
410
|
+
if available_roles.empty?
|
|
411
|
+
error_msg += "No roles directory found at #{File.join(prompts_dir, roles_prefix)}\n"
|
|
412
|
+
error_msg += "Create the directory and add role files to use this feature."
|
|
413
|
+
else
|
|
414
|
+
error_msg += "Available roles:\n"
|
|
415
|
+
error_msg += available_roles.map { |r| " - #{r}" }.join("\n")
|
|
416
|
+
error_msg += "\n\nCreate the role file or use an existing role."
|
|
417
|
+
end
|
|
406
418
|
|
|
407
|
-
|
|
408
|
-
Dir.glob("**/*.txt", base: roles_dir)
|
|
409
|
-
.map { |f| f.chomp('.txt') }
|
|
410
|
-
.sort
|
|
419
|
+
raise ArgumentError, error_msg
|
|
411
420
|
end
|
|
421
|
+
end
|
|
412
422
|
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
423
|
+
def list_available_roles
|
|
424
|
+
prompts_dir = ENV.fetch('AIA_PROMPTS__DIR', File.join(ENV['HOME'], '.prompts'))
|
|
425
|
+
roles_prefix = ENV.fetch('AIA_PROMPTS__ROLES_PREFIX', 'roles')
|
|
426
|
+
roles_dir = File.join(prompts_dir, roles_prefix)
|
|
417
427
|
|
|
418
|
-
|
|
419
|
-
|
|
428
|
+
if Dir.exist?(roles_dir)
|
|
429
|
+
roles = list_available_role_names(prompts_dir, roles_prefix)
|
|
430
|
+
|
|
431
|
+
if roles.empty?
|
|
432
|
+
puts "No role files found in #{roles_dir}"
|
|
433
|
+
puts "Create .txt files in this directory to define roles."
|
|
420
434
|
else
|
|
421
|
-
|
|
435
|
+
puts "Available roles in #{roles_dir}:"
|
|
436
|
+
roles.each { |role| puts " - #{role}" }
|
|
422
437
|
end
|
|
438
|
+
else
|
|
439
|
+
puts "No roles directory found at #{roles_dir}"
|
|
440
|
+
puts "Create this directory and add role files to use roles."
|
|
441
|
+
end
|
|
442
|
+
end
|
|
423
443
|
|
|
424
|
-
|
|
425
|
-
|
|
444
|
+
def list_available_role_names(prompts_dir, roles_prefix)
|
|
445
|
+
roles_dir = File.join(prompts_dir, roles_prefix)
|
|
446
|
+
return [] unless Dir.exist?(roles_dir)
|
|
426
447
|
|
|
427
|
-
|
|
428
|
-
|
|
448
|
+
Dir.glob("**/*.txt", base: roles_dir)
|
|
449
|
+
.map { |f| f.chomp('.txt') }
|
|
450
|
+
.sort
|
|
451
|
+
end
|
|
429
452
|
|
|
430
|
-
|
|
431
|
-
|
|
453
|
+
def list_available_models(query)
|
|
454
|
+
require 'ruby_llm'
|
|
432
455
|
|
|
433
|
-
|
|
456
|
+
if query.nil?
|
|
457
|
+
query = []
|
|
458
|
+
else
|
|
459
|
+
query = query.split(',')
|
|
460
|
+
end
|
|
434
461
|
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
outputs = llm.modalities.output.join(',')
|
|
438
|
-
entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
|
|
462
|
+
header = "\nAvailable LLMs"
|
|
463
|
+
header += " for #{query.join(' and ')}" if query.any?
|
|
439
464
|
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
puts entry
|
|
443
|
-
next
|
|
444
|
-
end
|
|
465
|
+
puts header + ':'
|
|
466
|
+
puts
|
|
445
467
|
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
q2.each{|q| show_it &&= entry.include?(q)}
|
|
468
|
+
q1 = query.select { |q| q.include?('_to_') }.map { |q| q[0] == ':' ? q[1..] : q }
|
|
469
|
+
q2 = query.reject { |q| q.include?('_to_') }
|
|
449
470
|
|
|
450
|
-
|
|
451
|
-
counter += 1
|
|
452
|
-
puts entry
|
|
453
|
-
end
|
|
454
|
-
end
|
|
471
|
+
counter = 0
|
|
455
472
|
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
473
|
+
RubyLLM.models.all.each do |llm|
|
|
474
|
+
inputs = llm.modalities.input.join(',')
|
|
475
|
+
outputs = llm.modalities.output.join(',')
|
|
476
|
+
entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
|
|
459
477
|
|
|
460
|
-
|
|
461
|
-
|
|
478
|
+
if query.nil? || query.empty?
|
|
479
|
+
counter += 1
|
|
480
|
+
puts entry
|
|
481
|
+
next
|
|
482
|
+
end
|
|
462
483
|
|
|
463
|
-
|
|
464
|
-
|
|
484
|
+
show_it = true
|
|
485
|
+
q1.each { |q| show_it &&= llm.modalities.send("#{q}?") }
|
|
486
|
+
q2.each { |q| show_it &&= entry.include?(q) }
|
|
465
487
|
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
rescue OptionParser::InvalidOption => e
|
|
470
|
-
puts e.message
|
|
471
|
-
puts opt_parser
|
|
472
|
-
exit 1
|
|
488
|
+
if show_it
|
|
489
|
+
counter += 1
|
|
490
|
+
puts entry
|
|
473
491
|
end
|
|
474
492
|
end
|
|
475
493
|
|
|
476
|
-
|
|
477
|
-
|
|
494
|
+
puts if counter > 0
|
|
495
|
+
puts "#{counter} LLMs matching your query"
|
|
496
|
+
puts
|
|
478
497
|
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
exit 1
|
|
482
|
-
else
|
|
483
|
-
paths = a_path_list.split(',').map(&:strip).uniq
|
|
484
|
-
end
|
|
498
|
+
exit
|
|
499
|
+
end
|
|
485
500
|
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
if File.file?(a_path)
|
|
489
|
-
if '.rb' == File.extname(a_path)
|
|
490
|
-
config.tool_paths << a_path
|
|
491
|
-
else
|
|
492
|
-
STDERR.puts "file should have *.rb extension: #{a_path}"
|
|
493
|
-
exit 1
|
|
494
|
-
end
|
|
495
|
-
elsif File.directory?(a_path)
|
|
496
|
-
rb_files = Dir.glob(File.join(a_path, '*.rb'))
|
|
497
|
-
config.tool_paths += rb_files
|
|
498
|
-
end
|
|
499
|
-
else
|
|
500
|
-
STDERR.puts "file/dir path is not valid: #{a_path}"
|
|
501
|
-
exit 1
|
|
502
|
-
end
|
|
503
|
-
end
|
|
501
|
+
def process_tools_paths(path_list)
|
|
502
|
+
paths = []
|
|
504
503
|
|
|
505
|
-
|
|
504
|
+
if path_list.empty?
|
|
505
|
+
STDERR.puts "No list of paths for --tools option"
|
|
506
|
+
exit 1
|
|
506
507
|
end
|
|
507
508
|
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
509
|
+
path_list.split(',').map(&:strip).uniq.each do |a_path|
|
|
510
|
+
if File.exist?(a_path)
|
|
511
|
+
if File.file?(a_path)
|
|
512
|
+
if '.rb' == File.extname(a_path)
|
|
513
|
+
paths << a_path
|
|
514
|
+
else
|
|
515
|
+
STDERR.puts "file should have *.rb extension: #{a_path}"
|
|
516
|
+
exit 1
|
|
517
|
+
end
|
|
518
|
+
elsif File.directory?(a_path)
|
|
519
|
+
rb_files = Dir.glob(File.join(a_path, '*.rb'))
|
|
520
|
+
paths += rb_files
|
|
521
|
+
end
|
|
513
522
|
else
|
|
514
|
-
|
|
515
|
-
config.allowed_tools.uniq!
|
|
516
|
-
end
|
|
517
|
-
end
|
|
518
|
-
|
|
519
|
-
def process_rejected_tools_option(tools_list, config)
|
|
520
|
-
config.rejected_tools ||= []
|
|
521
|
-
if tools_list.empty?
|
|
522
|
-
STDERR.puts "No list of tool names provided for --rejected_tools option"
|
|
523
|
+
STDERR.puts "file/dir path is not valid: #{a_path}"
|
|
523
524
|
exit 1
|
|
524
|
-
else
|
|
525
|
-
config.rejected_tools += tools_list.split(',').map(&:strip)
|
|
526
|
-
config.rejected_tools.uniq!
|
|
527
525
|
end
|
|
528
526
|
end
|
|
527
|
+
|
|
528
|
+
paths.uniq
|
|
529
529
|
end
|
|
530
530
|
end
|
|
531
531
|
end
|