aia 0.9.3rc1 → 0.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.version +1 -1
- data/CHANGELOG.md +7 -2
- data/README.md +1 -1
- data/lib/aia/chat_processor_service.rb +23 -40
- data/lib/aia/config.rb +20 -8
- data/lib/aia/context_manager.rb +6 -1
- data/lib/aia/directive_processor.rb +41 -1
- data/lib/aia/ruby_llm_adapter.rb +63 -47
- data/lib/aia/session.rb +4 -10
- data/lib/aia.rb +8 -1
- data/lib/extensions/ruby_llm/modalities.rb +26 -0
- data/lib/refinements/string.rb +16 -0
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9a4629811acd04507b4ca4f6902281dae7a3d35128af2a52b9278cf040fb85a4
|
4
|
+
data.tar.gz: 8d80eb38222142a366266091aae2863c2669f676fb9f362210abf12fb270c161
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5997271297a683a7664aab0b74612817fdf53b4d206e9cbc4d50b3cb8a7dc798922b3b60f7f762363723d6b77ab282309f10b6fbc25d110ac5bd4fb9a9435faf
|
7
|
+
data.tar.gz: cea1ffbd60baa36963339972b91557dc2fced33ee17e451513de13df3b0d582f3dceda3f6fb611b6f83cbd6b9d9069480cd92ef78507e75dcfa55c223d060889
|
data/.version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.9.
|
1
|
+
0.9.4
|
data/CHANGELOG.md
CHANGED
@@ -1,10 +1,15 @@
|
|
1
1
|
# Changelog
|
2
2
|
## [Unreleased]
|
3
|
-
|
3
|
+
|
4
|
+
## Released
|
5
|
+
### [0.9.4] 2025-06-03
|
6
|
+
- using RubyLLM v1.3.0
|
7
|
+
- setting up a docs infrastructure to behave like the ruby_llm gem's guides side
|
8
|
+
- fixed bug in the text-to-image workflow
|
9
|
+
- discovered that ruby_llm does not have high level support for audio modes
|
4
10
|
- need to pay attention to the test suite
|
5
11
|
- also need to ensure the non text2text modes are working
|
6
12
|
|
7
|
-
## Released
|
8
13
|
### [0.9.3rc1] 2025-05-24
|
9
14
|
- using ruby_llm v1.3.0rc1
|
10
15
|
- added a models database refresh based on integer days interval with the --refresh option
|
data/README.md
CHANGED
@@ -107,7 +107,7 @@ The following table provides a comprehensive list of configuration options, thei
|
|
107
107
|
| presence_penalty | --presence_penalty | 0.0 | AIA_PRESENCE_PENALTY |
|
108
108
|
| prompt_extname | | .txt | AIA_PROMPT_EXTNAME |
|
109
109
|
| prompts_dir | -p, --prompts_dir | ~/.prompts | AIA_PROMPTS_DIR |
|
110
|
-
| refresh | --refresh |
|
110
|
+
| refresh | --refresh | 7 (days) | AIA_REFRESH |
|
111
111
|
| require_libs | --rq --require | [] | AIA_REQUIRE_LIBS |
|
112
112
|
| role | -r, --role | | AIA_ROLE |
|
113
113
|
| roles_dir | | ~/.prompts/roles | AIA_ROLES_DIR |
|
@@ -12,7 +12,6 @@ module AIA
|
|
12
12
|
end
|
13
13
|
|
14
14
|
|
15
|
-
|
16
15
|
def speak(text)
|
17
16
|
return unless AIA.speak?
|
18
17
|
|
@@ -26,11 +25,10 @@ module AIA
|
|
26
25
|
end
|
27
26
|
|
28
27
|
|
29
|
-
|
30
|
-
def process_prompt(prompt, operation_type)
|
28
|
+
def process_prompt(prompt)
|
31
29
|
result = nil
|
32
|
-
@ui_presenter.with_spinner("Processing",
|
33
|
-
result = send_to_client(prompt
|
30
|
+
@ui_presenter.with_spinner("Processing", determine_operation_type) do
|
31
|
+
result = send_to_client(prompt)
|
34
32
|
end
|
35
33
|
|
36
34
|
unless result.is_a? String
|
@@ -41,42 +39,20 @@ module AIA
|
|
41
39
|
end
|
42
40
|
|
43
41
|
|
44
|
-
|
45
|
-
|
42
|
+
# conversation is an Array of Hashes. Each entry is an interchange
|
43
|
+
# with the LLM.
|
44
|
+
def send_to_client(conversation)
|
46
45
|
maybe_change_model
|
47
46
|
|
48
|
-
|
49
|
-
when :text_to_text
|
50
|
-
AIA.client.chat(prompt)
|
51
|
-
when :text_to_image
|
52
|
-
AIA.client.chat(prompt)
|
53
|
-
when :image_to_text
|
54
|
-
AIA.client.chat(prompt)
|
55
|
-
when :text_to_audio
|
56
|
-
AIA.client.chat(prompt)
|
57
|
-
when :audio_to_text
|
58
|
-
if prompt.strip.end_with?('.mp3', '.wav', '.m4a', '.flac') && File.exist?(prompt.strip)
|
59
|
-
AIA.client.transcribe(prompt.strip)
|
60
|
-
else
|
61
|
-
AIA.client.chat(prompt) # Fall back to regular chat
|
62
|
-
end
|
63
|
-
else
|
64
|
-
AIA.client.chat(prompt)
|
65
|
-
end
|
47
|
+
AIA.client.chat(conversation)
|
66
48
|
end
|
67
49
|
|
68
50
|
|
69
51
|
def maybe_change_model
|
70
|
-
|
71
|
-
client_model = AIA.client.model # AiClient instance
|
72
|
-
else
|
73
|
-
client_model = AIA.client.model.id # RubyLLM::Model instance
|
74
|
-
end
|
52
|
+
client_model = AIA.client.model.id # RubyLLM::Model instance
|
75
53
|
|
76
|
-
# when adapter is ruby_llm must use model.id as the name
|
77
54
|
unless AIA.config.model.downcase.include?(client_model.downcase)
|
78
|
-
|
79
|
-
AIA.client = AIClientAdapter.new
|
55
|
+
AIA.client = AIA.client.class.new
|
80
56
|
end
|
81
57
|
end
|
82
58
|
|
@@ -107,7 +83,6 @@ module AIA
|
|
107
83
|
end
|
108
84
|
|
109
85
|
|
110
|
-
|
111
86
|
def process_next_prompts(response, prompt_handler)
|
112
87
|
if @directive_processor.directive?(response)
|
113
88
|
directive_result = @directive_processor.process(response, @history_manager.history)
|
@@ -117,16 +92,24 @@ module AIA
|
|
117
92
|
end
|
118
93
|
|
119
94
|
|
120
|
-
def determine_operation_type
|
121
|
-
|
122
|
-
if
|
95
|
+
def determine_operation_type
|
96
|
+
mode = AIA.config.client.model.modalities
|
97
|
+
if mode.supports?(:text_to_image)
|
123
98
|
:text_to_image
|
124
|
-
elsif
|
99
|
+
elsif mode.supports?(:image_to_text)
|
125
100
|
:image_to_text
|
126
|
-
elsif
|
101
|
+
elsif mode.supports?(:audio_to_text)
|
127
102
|
:audio_to_text
|
128
|
-
elsif
|
103
|
+
elsif mode.supports?(:text_to_audio)
|
129
104
|
:text_to_audio
|
105
|
+
elsif mode.supports?(:audio_to_audio)
|
106
|
+
:audio_to_audio
|
107
|
+
elsif mode.supports?(:image_to_image)
|
108
|
+
:image_to_image
|
109
|
+
elsif mode.supports?(:audio_to_image)
|
110
|
+
:audio_to_image
|
111
|
+
elsif mode.supports?(:image_to_audio)
|
112
|
+
:image_to_audio
|
130
113
|
else
|
131
114
|
:text_to_text
|
132
115
|
end
|
data/lib/aia/config.rb
CHANGED
@@ -17,17 +17,22 @@ require 'fileutils'
|
|
17
17
|
module AIA
|
18
18
|
class Config
|
19
19
|
DEFAULT_CONFIG = OpenStruct.new({
|
20
|
+
adapter: 'ruby_llm', # 'ruby_llm' or ???
|
21
|
+
#
|
20
22
|
aia_dir: File.join(ENV['HOME'], '.aia'),
|
21
23
|
config_file: File.join(ENV['HOME'], '.aia', 'config.yml'),
|
22
24
|
out_file: 'temp.md',
|
23
25
|
log_file: File.join(ENV['HOME'], '.prompts', '_prompts.log'),
|
24
|
-
|
26
|
+
context_files: [],
|
25
27
|
#
|
28
|
+
prompts_dir: File.join(ENV['HOME'], '.prompts'),
|
26
29
|
prompt_extname: PromptManager::Storage::FileSystemAdapter::PROMPT_EXTENSION,
|
27
30
|
#
|
28
31
|
roles_prefix: 'roles',
|
29
32
|
roles_dir: File.join(ENV['HOME'], '.prompts', 'roles'),
|
30
33
|
role: '',
|
34
|
+
|
35
|
+
#
|
31
36
|
system_prompt: '',
|
32
37
|
|
33
38
|
# Tools
|
@@ -61,23 +66,26 @@ module AIA
|
|
61
66
|
top_p: 1.0,
|
62
67
|
frequency_penalty: 0.0,
|
63
68
|
presence_penalty: 0.0,
|
69
|
+
|
70
|
+
# Audio Parameters
|
71
|
+
voice: 'alloy',
|
72
|
+
speak_command: 'afplay', # 'afplay' for audio files on MacOS
|
73
|
+
|
74
|
+
# Image Parameters
|
64
75
|
image_size: '1024x1024',
|
65
76
|
image_quality: 'standard',
|
66
77
|
image_style: 'vivid',
|
67
78
|
|
79
|
+
# Models
|
68
80
|
model: 'gpt-4o-mini',
|
69
81
|
speech_model: 'tts-1',
|
70
82
|
transcription_model: 'whisper-1',
|
71
83
|
embedding_model: 'text-embedding-ada-002',
|
72
84
|
image_model: 'dall-e-3',
|
73
|
-
refresh: 0, # days between refreshes of model info; 0 means every startup
|
74
|
-
last_refresh: Date.today - 1,
|
75
|
-
|
76
|
-
voice: 'alloy',
|
77
|
-
adapter: 'ruby_llm', # 'ruby_llm' or ???
|
78
85
|
|
79
|
-
#
|
80
|
-
|
86
|
+
# Model Regristery
|
87
|
+
refresh: 7, # days between refreshes of model info; 0 means every startup
|
88
|
+
last_refresh: Date.today - 1,
|
81
89
|
|
82
90
|
# Ruby libraries to require for Ruby binding
|
83
91
|
require_libs: [],
|
@@ -450,6 +458,7 @@ module AIA
|
|
450
458
|
config.system_prompt = prompt_id
|
451
459
|
end
|
452
460
|
|
461
|
+
###################################################
|
453
462
|
# AI model parameters
|
454
463
|
opts.on("-t", "--temperature TEMP", Float, "Temperature for text generation") do |temp|
|
455
464
|
config.temperature = temp
|
@@ -629,6 +638,9 @@ module AIA
|
|
629
638
|
|
630
639
|
config_hash = config.to_h
|
631
640
|
|
641
|
+
# Remove prompt_id to prevent automatic initial pompting in --chat mode
|
642
|
+
config_hash.delete(:prompt_id)
|
643
|
+
|
632
644
|
# Remove dump_file key to prevent automatic exit on next load
|
633
645
|
config_hash.delete(:dump_file)
|
634
646
|
|
data/lib/aia/context_manager.rb
CHANGED
@@ -27,7 +27,12 @@ module AIA
|
|
27
27
|
# @return [Array<Hash>] The conversation context array.
|
28
28
|
def get_context(system_prompt: nil)
|
29
29
|
# Ensure system prompt is present if provided and not already the first message
|
30
|
-
if
|
30
|
+
if system_prompt &&
|
31
|
+
!system_prompt.strip.empty? &&
|
32
|
+
(
|
33
|
+
@context.empty? ||
|
34
|
+
@context.first[:role] != 'system'
|
35
|
+
)
|
31
36
|
add_system_prompt(system_prompt)
|
32
37
|
end
|
33
38
|
@context
|
@@ -4,6 +4,8 @@ require 'faraday'
|
|
4
4
|
|
5
5
|
module AIA
|
6
6
|
class DirectiveProcessor
|
7
|
+
using Refinements
|
8
|
+
|
7
9
|
PUREMD_API_KEY = ENV.fetch('PUREMD_API_KEY', nil)
|
8
10
|
EXCLUDED_METHODS = %w[ run initialize private? ]
|
9
11
|
@descriptions = {}
|
@@ -117,7 +119,6 @@ module AIA
|
|
117
119
|
!respond_to?(method_name) && respond_to?(method_name, true)
|
118
120
|
end
|
119
121
|
|
120
|
-
|
121
122
|
################
|
122
123
|
## Directives ##
|
123
124
|
################
|
@@ -295,6 +296,45 @@ module AIA
|
|
295
296
|
""
|
296
297
|
end
|
297
298
|
|
299
|
+
desc "All Available models or query on [partial LLM or provider name] Examples: //llms ; //llms openai ; //llms claude"
|
300
|
+
def available_models( args=nil, context_manager=nil)
|
301
|
+
query = args
|
302
|
+
header = "Available LLMs"
|
303
|
+
|
304
|
+
if query
|
305
|
+
header += " for #{query.join(' and ')}"
|
306
|
+
end
|
307
|
+
|
308
|
+
puts header + ':'
|
309
|
+
|
310
|
+
q1 = query.select{|q| !q.start_with?(':')}
|
311
|
+
q2 = query.select{|q| q.start_with?(':')}
|
312
|
+
|
313
|
+
RubyLLM.models.all.each do |llm|
|
314
|
+
inputs = llm.modalities.input.join(',')
|
315
|
+
outputs = llm.modalities.output.join(',')
|
316
|
+
entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
|
317
|
+
|
318
|
+
if query.nil? || query.empty?
|
319
|
+
puts entry
|
320
|
+
next
|
321
|
+
end
|
322
|
+
|
323
|
+
show_it = true
|
324
|
+
q1.each{|q| show_it &&= entry.include?(q)}
|
325
|
+
q2.each{|q| show_it &&= llm.modalities.supports?(q)}
|
326
|
+
|
327
|
+
puts entry if show_it
|
328
|
+
end
|
329
|
+
|
330
|
+
""
|
331
|
+
end
|
332
|
+
alias_method :am, :available_models
|
333
|
+
alias_method :available, :available_models
|
334
|
+
alias_method :models, :available_models
|
335
|
+
alias_method :all_models, :available_models
|
336
|
+
alias_method :llms, :available_models
|
337
|
+
|
298
338
|
desc "Generates this help content"
|
299
339
|
def help(args=nil, context_manager=nil)
|
300
340
|
puts
|
data/lib/aia/ruby_llm_adapter.rb
CHANGED
@@ -1,25 +1,5 @@
|
|
1
1
|
# lib/aia/ruby_llm_adapter.rb
|
2
2
|
|
3
|
-
require 'ruby_llm'
|
4
|
-
|
5
|
-
class RubyLLM::Modalities
|
6
|
-
def supports?(query_mode)
|
7
|
-
parts = query_mode
|
8
|
-
.to_s
|
9
|
-
.downcase
|
10
|
-
.split(/2|-to-| to |_to_/)
|
11
|
-
.map(&:strip)
|
12
|
-
|
13
|
-
if 2 == parts.size
|
14
|
-
input.include?(parts[0]) && output.include?(parts[1])
|
15
|
-
elsif 1 == parts.size
|
16
|
-
input.include?(parts[0]) || output.include?(parts[0])
|
17
|
-
else
|
18
|
-
false
|
19
|
-
end
|
20
|
-
end
|
21
|
-
end
|
22
|
-
|
23
3
|
module AIA
|
24
4
|
class RubyLLMAdapter
|
25
5
|
attr_reader :tools
|
@@ -135,7 +115,7 @@ module AIA
|
|
135
115
|
end
|
136
116
|
|
137
117
|
def transcribe(audio_file)
|
138
|
-
@chat.ask("Transcribe this audio", with:
|
118
|
+
@chat.ask("Transcribe this audio", with: audio_file)
|
139
119
|
end
|
140
120
|
|
141
121
|
def speak(text)
|
@@ -243,21 +223,50 @@ module AIA
|
|
243
223
|
end
|
244
224
|
end
|
245
225
|
|
226
|
+
|
227
|
+
#########################################
|
228
|
+
## text
|
229
|
+
|
246
230
|
def text_to_text(prompt)
|
247
231
|
text_prompt = extract_text_prompt(prompt)
|
248
|
-
|
232
|
+
response = if AIA.config.context_files.empty?
|
233
|
+
@chat.ask(text_prompt)
|
234
|
+
else
|
235
|
+
@chat.ask(text_prompt, with: AIA.config.context_files)
|
236
|
+
end
|
237
|
+
|
238
|
+
response.content
|
239
|
+
rescue => e
|
240
|
+
e.message
|
241
|
+
end
|
242
|
+
|
243
|
+
|
244
|
+
#########################################
|
245
|
+
## Image
|
246
|
+
|
247
|
+
def extract_image_path(prompt)
|
248
|
+
if prompt.is_a?(String)
|
249
|
+
match = prompt.match(/\b[\w\/\.\-_]+?\.(jpg|jpeg|png|gif|webp)\b/i)
|
250
|
+
match ? match[0] : nil
|
251
|
+
elsif prompt.is_a?(Hash)
|
252
|
+
prompt[:image] || prompt[:image_path]
|
253
|
+
else
|
254
|
+
nil
|
255
|
+
end
|
249
256
|
end
|
250
257
|
|
251
258
|
def text_to_image(prompt)
|
252
259
|
text_prompt = extract_text_prompt(prompt)
|
253
|
-
|
260
|
+
image_name = extract_image_path(text_prompt)
|
254
261
|
|
255
262
|
begin
|
256
|
-
RubyLLM.paint(text_prompt,
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
263
|
+
image = RubyLLM.paint(text_prompt, size: AIA.config.image_size)
|
264
|
+
if image_name
|
265
|
+
image_path = image.save(image_name)
|
266
|
+
"Image generated and saved to: #{image_path}"
|
267
|
+
else
|
268
|
+
"Image generated and available at: #{image.url}"
|
269
|
+
end
|
261
270
|
rescue => e
|
262
271
|
"Error generating image: #{e.message}"
|
263
272
|
end
|
@@ -269,7 +278,7 @@ module AIA
|
|
269
278
|
|
270
279
|
if image_path && File.exist?(image_path)
|
271
280
|
begin
|
272
|
-
@chat.ask(text_prompt, with:
|
281
|
+
@chat.ask(text_prompt, with: image_path).content
|
273
282
|
rescue => e
|
274
283
|
"Error analyzing image: #{e.message}"
|
275
284
|
end
|
@@ -278,13 +287,21 @@ module AIA
|
|
278
287
|
end
|
279
288
|
end
|
280
289
|
|
290
|
+
|
291
|
+
#########################################
|
292
|
+
## audio
|
293
|
+
|
294
|
+
def audio_file?(filepath)
|
295
|
+
filepath.to_s.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
|
296
|
+
end
|
297
|
+
|
281
298
|
def text_to_audio(prompt)
|
282
299
|
text_prompt = extract_text_prompt(prompt)
|
283
300
|
output_file = "#{Time.now.to_i}.mp3"
|
284
301
|
|
285
302
|
begin
|
286
303
|
# Note: RubyLLM doesn't have a direct TTS feature
|
287
|
-
# This is a placeholder for a custom implementation
|
304
|
+
# TODO: This is a placeholder for a custom implementation
|
288
305
|
File.write(output_file, text_prompt)
|
289
306
|
system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
|
290
307
|
"Audio generated and saved to: #{output_file}"
|
@@ -293,16 +310,23 @@ module AIA
|
|
293
310
|
end
|
294
311
|
end
|
295
312
|
|
296
|
-
# TODO: what if its a multi-mode model and a text prompt is provided with
|
297
|
-
# the audio file?
|
298
313
|
def audio_to_text(prompt)
|
299
|
-
|
300
|
-
|
314
|
+
text_prompt = extract_text_prompt(prompt)
|
315
|
+
text_prompt = 'Transcribe this audio' if text_prompt.nil? || text_prompt.empty?
|
316
|
+
|
317
|
+
# TODO: I don't think that "prompt" would ever be an audio filepath.
|
318
|
+
# Check prompt to see if it is a PromptManager object that has context_files
|
301
319
|
|
302
|
-
if
|
303
|
-
|
320
|
+
if prompt.is_a?(String) &&
|
321
|
+
File.exist?(prompt) &&
|
322
|
+
audio_file?(prompt)
|
304
323
|
begin
|
305
|
-
|
324
|
+
response = if AIA.config.context_files.empty?
|
325
|
+
@chat.ask(text_prompt)
|
326
|
+
else
|
327
|
+
@chat.ask(text_prompt, with: AIA.config.context_files)
|
328
|
+
end
|
329
|
+
response.content
|
306
330
|
rescue => e
|
307
331
|
"Error transcribing audio: #{e.message}"
|
308
332
|
end
|
@@ -311,15 +335,7 @@ module AIA
|
|
311
335
|
text_to_text(prompt)
|
312
336
|
end
|
313
337
|
end
|
314
|
-
|
315
|
-
def extract_image_path(prompt)
|
316
|
-
if prompt.is_a?(String)
|
317
|
-
prompt.scan(/\b[\w\/\.\-]+\.(jpg|jpeg|png|gif|webp)\b/i).first&.first
|
318
|
-
elsif prompt.is_a?(Hash)
|
319
|
-
prompt[:image] || prompt[:image_path]
|
320
|
-
else
|
321
|
-
nil
|
322
|
-
end
|
323
|
-
end
|
324
338
|
end
|
325
339
|
end
|
340
|
+
|
341
|
+
__END__
|
data/lib/aia/session.rb
CHANGED
@@ -123,16 +123,13 @@ module AIA
|
|
123
123
|
prompt_text = "#{prompt_text}\n\nContext:\n#{context}"
|
124
124
|
end
|
125
125
|
|
126
|
-
# Determine operation type
|
127
|
-
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
128
|
-
|
129
126
|
# Add initial user prompt to context *before* sending to AI
|
130
127
|
@context_manager.add_to_context(role: 'user', content: prompt_text)
|
131
128
|
|
132
129
|
# Process the initial prompt
|
133
130
|
@ui_presenter.display_thinking_animation
|
134
131
|
# Send the current context (which includes the user prompt)
|
135
|
-
response = @chat_processor.process_prompt(@context_manager.get_context
|
132
|
+
response = @chat_processor.process_prompt(@context_manager.get_context)
|
136
133
|
|
137
134
|
# Add AI response to context
|
138
135
|
@context_manager.add_to_context(role: 'assistant', content: response)
|
@@ -202,9 +199,8 @@ module AIA
|
|
202
199
|
@context_manager.add_to_context(role: 'user', content: context)
|
203
200
|
|
204
201
|
# Process the context
|
205
|
-
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
206
202
|
@ui_presenter.display_thinking_animation
|
207
|
-
response = @chat_processor.process_prompt(@context_manager.get_context
|
203
|
+
response = @chat_processor.process_prompt(@context_manager.get_context)
|
208
204
|
|
209
205
|
# Add AI response to context
|
210
206
|
@context_manager.add_to_context(role: 'assistant', content: response)
|
@@ -228,9 +224,8 @@ module AIA
|
|
228
224
|
|
229
225
|
@context_manager.add_to_context(role: 'user', content: processed_input)
|
230
226
|
|
231
|
-
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
232
227
|
@ui_presenter.display_thinking_animation
|
233
|
-
response = @chat_processor.process_prompt(@context_manager.get_context
|
228
|
+
response = @chat_processor.process_prompt(@context_manager.get_context)
|
234
229
|
|
235
230
|
@context_manager.add_to_context(role: 'assistant', content: response)
|
236
231
|
@chat_processor.output_response(response)
|
@@ -292,9 +287,8 @@ module AIA
|
|
292
287
|
@context_manager.add_to_context(role: 'user', content: processed_prompt)
|
293
288
|
conversation = @context_manager.get_context
|
294
289
|
|
295
|
-
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
296
290
|
@ui_presenter.display_thinking_animation
|
297
|
-
response = @chat_processor.process_prompt(conversation
|
291
|
+
response = @chat_processor.process_prompt(conversation)
|
298
292
|
|
299
293
|
@ui_presenter.display_ai_response(response)
|
300
294
|
@context_manager.add_to_context(role: 'assistant', content: response)
|
data/lib/aia.rb
CHANGED
@@ -12,7 +12,14 @@ include DebugMe
|
|
12
12
|
$DEBUG_ME = false
|
13
13
|
DebugMeDefaultOptions[:skip1] = true
|
14
14
|
|
15
|
-
require_relative 'extensions/openstruct_merge'
|
15
|
+
require_relative 'extensions/openstruct_merge' # adds self.merge self.get_value
|
16
|
+
require_relative 'extensions/ruby_llm/modalities' # adds model.modalities.supports? :text-to-text etc.
|
17
|
+
|
18
|
+
require_relative 'refinements/string.rb' # adds #include_any? #include_all?
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
|
16
23
|
require_relative 'aia/utility'
|
17
24
|
require_relative 'aia/version'
|
18
25
|
require_relative 'aia/config'
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# lib/extensions/ruby_llm/modalities.rb
|
2
|
+
# A models "modes" are often expressed in terms like:
|
3
|
+
# text-to-text
|
4
|
+
# text_to_audio
|
5
|
+
# audio to image
|
6
|
+
# image2image
|
7
|
+
# This new supports? method tests the models modalities against
|
8
|
+
# these common expressions
|
9
|
+
|
10
|
+
class RubyLLM::Model::Modalities
|
11
|
+
def supports?(query_mode)
|
12
|
+
parts = query_mode
|
13
|
+
.to_s
|
14
|
+
.downcase
|
15
|
+
.split(/2|-to-| to |_to_/)
|
16
|
+
.map(&:strip)
|
17
|
+
|
18
|
+
if 2 == parts.size
|
19
|
+
input.include?(parts[0]) && output.include?(parts[1])
|
20
|
+
elsif 1 == parts.size
|
21
|
+
input.include?(parts[0]) || output.include?(parts[0])
|
22
|
+
else
|
23
|
+
false
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# lib/aia_refinements/string.rb
|
2
|
+
|
3
|
+
|
4
|
+
module Refinements
|
5
|
+
refine String do
|
6
|
+
def include_all?(substrings)
|
7
|
+
Array(substrings).all? { |substring| self.include?(substring) }
|
8
|
+
end
|
9
|
+
alias :all? :include_all?
|
10
|
+
|
11
|
+
def include_any?(substrings)
|
12
|
+
Array(substrings).any? { |substring| self.include?(substring) }
|
13
|
+
end
|
14
|
+
alias :any? :include_any?
|
15
|
+
end
|
16
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aia
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Dewayne VanHoozer
|
@@ -57,14 +57,14 @@ dependencies:
|
|
57
57
|
requirements:
|
58
58
|
- - ">="
|
59
59
|
- !ruby/object:Gem::Version
|
60
|
-
version: 1.3.
|
60
|
+
version: 1.3.0
|
61
61
|
type: :runtime
|
62
62
|
prerelease: false
|
63
63
|
version_requirements: !ruby/object:Gem::Requirement
|
64
64
|
requirements:
|
65
65
|
- - ">="
|
66
66
|
- !ruby/object:Gem::Version
|
67
|
-
version: 1.3.
|
67
|
+
version: 1.3.0
|
68
68
|
- !ruby/object:Gem::Dependency
|
69
69
|
name: reline
|
70
70
|
requirement: !ruby/object:Gem::Requirement
|
@@ -311,6 +311,8 @@ files:
|
|
311
311
|
- lib/aia/utility.rb
|
312
312
|
- lib/aia/version.rb
|
313
313
|
- lib/extensions/openstruct_merge.rb
|
314
|
+
- lib/extensions/ruby_llm/modalities.rb
|
315
|
+
- lib/refinements/string.rb
|
314
316
|
- main.just
|
315
317
|
- mcp_servers/README.md
|
316
318
|
- mcp_servers/filesystem.json
|