aia 0.9.2 → 0.9.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,8 @@ require 'faraday'
4
4
 
5
5
  module AIA
6
6
  class DirectiveProcessor
7
+ using Refinements
8
+
7
9
  PUREMD_API_KEY = ENV.fetch('PUREMD_API_KEY', nil)
8
10
  EXCLUDED_METHODS = %w[ run initialize private? ]
9
11
  @descriptions = {}
@@ -57,7 +59,7 @@ module AIA
57
59
  else
58
60
  a_string.to_s
59
61
  end
60
-
62
+
61
63
  content.strip.start_with?(PromptManager::Prompt::DIRECTIVE_SIGNAL)
62
64
  end
63
65
 
@@ -117,7 +119,6 @@ module AIA
117
119
  !respond_to?(method_name) && respond_to?(method_name, true)
118
120
  end
119
121
 
120
-
121
122
  ################
122
123
  ## Directives ##
123
124
  ################
@@ -162,6 +163,7 @@ module AIA
162
163
  end
163
164
  ''
164
165
  end
166
+ alias_method :workflow, :pipeline
165
167
 
166
168
  desc "Inserts the contents of a file Example: //include path/to/file"
167
169
  def include(args, context_manager=nil)
@@ -240,6 +242,10 @@ module AIA
240
242
 
241
243
  desc "Clears the conversation history (aka context) same as //config clear = true"
242
244
  def clear(args, context_manager=nil)
245
+ # TODO: review the robot's code in the Session class for when the
246
+ # //clear directive is used in a follow up prompt. That processing
247
+ # should be moved here so that it is also available in batch
248
+ # sessions.
243
249
  if context_manager.nil?
244
250
  return "Error: Context manager not available for //clear directive."
245
251
  end
@@ -290,6 +296,45 @@ module AIA
290
296
  ""
291
297
  end
292
298
 
299
+ desc "All Available models or query on [partial LLM or provider name] Examples: //llms ; //llms openai ; //llms claude"
300
+ def available_models( args=nil, context_manager=nil)
301
+ query = args
302
+ header = "Available LLMs"
303
+
304
+ if query
305
+ header += " for #{query.join(' and ')}"
306
+ end
307
+
308
+ puts header + ':'
309
+
310
+ q1 = query.select{|q| !q.start_with?(':')}
311
+ q2 = query.select{|q| q.start_with?(':')}
312
+
313
+ RubyLLM.models.all.each do |llm|
314
+ inputs = llm.modalities.input.join(',')
315
+ outputs = llm.modalities.output.join(',')
316
+ entry = "- #{llm.id} (#{llm.provider}) #{inputs} to #{outputs}"
317
+
318
+ if query.nil? || query.empty?
319
+ puts entry
320
+ next
321
+ end
322
+
323
+ show_it = true
324
+ q1.each{|q| show_it &&= entry.include?(q)}
325
+ q2.each{|q| show_it &&= llm.modalities.supports?(q)}
326
+
327
+ puts entry if show_it
328
+ end
329
+
330
+ ""
331
+ end
332
+ alias_method :am, :available_models
333
+ alias_method :available, :available_models
334
+ alias_method :models, :available_models
335
+ alias_method :all_models, :available_models
336
+ alias_method :llms, :available_models
337
+
293
338
  desc "Generates this help content"
294
339
  def help(args=nil, context_manager=nil)
295
340
  puts
@@ -1,54 +1,121 @@
1
1
  # lib/aia/ruby_llm_adapter.rb
2
2
 
3
- require 'ruby_llm'
4
-
5
3
  module AIA
6
4
  class RubyLLMAdapter
7
5
  attr_reader :tools
8
6
 
9
7
  def initialize
10
- @model = AIA.config.model
11
- model_info = extract_model_parts(@model)
8
+ @provider, @model = extract_model_parts.values
9
+
10
+ configure_rubyllm
11
+ refresh_local_model_registry
12
+ setup_chat_with_tools
13
+ end
12
14
 
13
- # Configure RubyLLM with available API keys
15
+ def configure_rubyllm
16
+ # TODO: Add some of these configuration items to AIA.config
14
17
  RubyLLM.configure do |config|
15
- config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
16
- config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
17
- config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
18
- config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
18
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
19
+ config.openai_organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
20
+ config.openai_project_id = ENV.fetch('OPENAI_PROJECT_ID', nil)
21
+
22
+ config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
23
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
24
+ config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
25
+ config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
26
+
27
+ config.bedrock_api_key = ENV.fetch('BEDROCK_ACCESS_KEY_ID', nil)
28
+ config.bedrock_secret_key = ENV.fetch('BEDROCK_SECRET_ACCESS_KEY', nil)
29
+ config.bedrock_region = ENV.fetch('BEDROCK_REGION', nil)
30
+ config.bedrock_session_token = ENV.fetch('BEDROCK_SESSION_TOKEN', nil)
31
+
32
+ config.ollama_api_base = ENV.fetch('OLLAMA_API_BASE', nil)
33
+
34
+ # --- Custom OpenAI Endpoint ---
35
+ # Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
36
+ config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
37
+
38
+ # --- Default Models ---
39
+ # Used by RubyLLM.chat, RubyLLM.embed, RubyLLM.paint if no model is specified.
40
+ # config.default_model = 'gpt-4.1-nano' # Default: 'gpt-4.1-nano'
41
+ # config.default_embedding_model = 'text-embedding-3-small' # Default: 'text-embedding-3-small'
42
+ # config.default_image_model = 'dall-e-3' # Default: 'dall-e-3'
43
+
44
+ # --- Connection Settings ---
45
+ # config.request_timeout = 120 # Request timeout in seconds (default: 120)
46
+ # config.max_retries = 3 # Max retries on transient network errors (default: 3)
47
+ # config.retry_interval = 0.1 # Initial delay in seconds (default: 0.1)
48
+ # config.retry_backoff_factor = 2 # Multiplier for subsequent retries (default: 2)
49
+ # config.retry_interval_randomness = 0.5 # Jitter factor (default: 0.5)
50
+
51
+ # --- Logging Settings ---
52
+ # config.log_file = '/logs/ruby_llm.log'
53
+ config.log_level = :fatal # debug level can also be set to debug by setting RUBYLLM_DEBUG envar to true
54
+ end
55
+ end
56
+
57
+ def refresh_local_model_registry
58
+ if AIA.config.refresh.nil? ||
59
+ Integer(AIA.config.refresh).zero? ||
60
+ Date.today > (AIA.config.last_refresh + Integer(AIA.config.refresh))
61
+ RubyLLM.models.refresh!
62
+ AIA.config.last_refresh = Date.today
63
+ if AIA.config.config_file
64
+ AIA::Config.dump_config(AIA.config, AIA.config.config_file)
65
+ end
66
+ end
67
+ end
19
68
 
20
- # Bedrock configuration
21
- config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
22
- config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
23
- config.bedrock_region = ENV.fetch('AWS_REGION', nil)
24
- config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
69
+ def setup_chat_with_tools
70
+ begin
71
+ @chat = RubyLLM.chat(model: @model)
72
+ rescue => e
73
+ STDERR.puts "ERROR: #{e.message}"
74
+ exit 1
25
75
  end
26
76
 
27
- @chat = RubyLLM.chat(model: model_info[:model])
77
+ if !AIA.config.tool_paths.empty? && !@chat.model.supports?(:function_calling)
78
+ STDERR.puts "ERROR: The model #{@model} does not support tools"
79
+ exit 1
80
+ end
28
81
 
29
82
  @tools = ObjectSpace.each_object(Class).select do |klass|
30
83
  klass < RubyLLM::Tool
31
84
  end
32
85
 
33
- @chat.with_tools(*tools) unless tools.empty?
86
+ unless tools.empty?
87
+ @chat.with_tools(*tools)
88
+ AIA.config.tools = tools.map(&:name).join(', ')
89
+ end
34
90
  end
35
91
 
92
+ # TODO: Need to rethink this dispatcher pattern w/r/t RubyLLM's capabilities
93
+ # This code was originally designed for AiClient
94
+ #
36
95
  def chat(prompt)
37
- if @model.downcase.include?('dall-e') || @model.downcase.include?('image-generation')
38
- text_to_image(prompt)
39
- elsif @model.downcase.include?('vision') || @model.downcase.include?('image')
96
+ modes = @chat.model.modalities
97
+
98
+ # TODO: Need to consider how to handle multi-mode models
99
+ if modes.supports? :text_to_text
100
+ text_to_text(prompt)
101
+
102
+ elsif modes.supports? :image_to_text
40
103
  image_to_text(prompt)
41
- elsif @model.downcase.include?('tts') || @model.downcase.include?('speech')
104
+ elsif modes.supports? :text_to_image
105
+ text_to_image(prompt)
106
+
107
+ elsif modes.supports? :text_to_audio
42
108
  text_to_audio(prompt)
43
- elsif @model.downcase.include?('whisper') || @model.downcase.include?('transcription')
109
+ elsif modes.supports? :audio_to_text
44
110
  audio_to_text(prompt)
111
+
45
112
  else
46
- text_to_text(prompt)
113
+ # TODO: what else can be done?
47
114
  end
48
115
  end
49
116
 
50
117
  def transcribe(audio_file)
51
- @chat.ask("Transcribe this audio", with: { audio: audio_file })
118
+ @chat.ask("Transcribe this audio", with: audio_file)
52
119
  end
53
120
 
54
121
  def speak(text)
@@ -67,14 +134,6 @@ module AIA
67
134
  end
68
135
  end
69
136
 
70
- def method_missing(method, *args, &block)
71
- if @chat.respond_to?(method)
72
- @chat.public_send(method, *args, &block)
73
- else
74
- super
75
- end
76
- end
77
-
78
137
  # Clear the chat context/history
79
138
  # Needed for the //clear directive
80
139
  def clear_context
@@ -88,12 +147,18 @@ module AIA
88
147
 
89
148
  # Option 2: Force RubyLLM to create a new chat instance at the global level
90
149
  # This ensures any shared state is reset
91
- model_info = extract_model_parts(@model)
150
+ @provider, @model = extract_model_parts.values
92
151
  RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
93
152
 
94
153
  # Option 3: Create a completely fresh chat instance for this adapter
95
154
  @chat = nil # First nil it to help garbage collection
96
- @chat = RubyLLM.chat(model: model_info[:model])
155
+
156
+ begin
157
+ @chat = RubyLLM.chat(model: @model)
158
+ rescue => e
159
+ STDERR.puts "ERROR: #{e.message}"
160
+ exit 1
161
+ end
97
162
 
98
163
  # Option 4: Call official clear_history method if it exists
99
164
  if @chat.respond_to?(:clear_history)
@@ -114,22 +179,33 @@ module AIA
114
179
  end
115
180
  end
116
181
 
182
+ def method_missing(method, *args, &block)
183
+ if @chat.respond_to?(method)
184
+ @chat.public_send(method, *args, &block)
185
+ else
186
+ super
187
+ end
188
+ end
189
+
117
190
  def respond_to_missing?(method, include_private = false)
118
191
  @chat.respond_to?(method) || super
119
192
  end
120
193
 
121
194
  private
122
195
 
123
- def extract_model_parts(model_string)
124
- parts = model_string.split('/')
196
+ def extract_model_parts
197
+ parts = AIA.config.model.split('/')
125
198
  parts.map!(&:strip)
126
199
 
127
- if parts.length > 1
128
- provider = parts[0]
129
- model = parts[1]
200
+ if 2 == parts.length
201
+ provider = parts[0]
202
+ model = parts[1]
203
+ elsif 1 == parts.length
204
+ provider = nil # RubyLLM will figure it out from the model name
205
+ model = parts[0]
130
206
  else
131
- provider = nil # RubyLLM will figure it out from the model name
132
- model = parts[0]
207
+ STDERR.puts "ERROR: malformed model name: #{AIA.config.model}"
208
+ exit 1
133
209
  end
134
210
 
135
211
  { provider: provider, model: model }
@@ -147,33 +223,62 @@ module AIA
147
223
  end
148
224
  end
149
225
 
226
+
227
+ #########################################
228
+ ## text
229
+
150
230
  def text_to_text(prompt)
151
231
  text_prompt = extract_text_prompt(prompt)
152
- @chat.ask(text_prompt)
232
+ response = if AIA.config.context_files.empty?
233
+ @chat.ask(text_prompt)
234
+ else
235
+ @chat.ask(text_prompt, with: AIA.config.context_files)
236
+ end
237
+
238
+ response.content
239
+ rescue => e
240
+ e.message
241
+ end
242
+
243
+
244
+ #########################################
245
+ ## Image
246
+
247
+ def extract_image_path(prompt)
248
+ if prompt.is_a?(String)
249
+ match = prompt.match(/\b[\w\/\.\-_]+?\.(jpg|jpeg|png|gif|webp)\b/i)
250
+ match ? match[0] : nil
251
+ elsif prompt.is_a?(Hash)
252
+ prompt[:image] || prompt[:image_path]
253
+ else
254
+ nil
255
+ end
153
256
  end
154
257
 
155
258
  def text_to_image(prompt)
156
259
  text_prompt = extract_text_prompt(prompt)
157
- output_file = "#{Time.now.to_i}.png"
260
+ image_name = extract_image_path(text_prompt)
158
261
 
159
262
  begin
160
- RubyLLM.paint(text_prompt, output_path: output_file,
161
- size: AIA.config.image_size,
162
- quality: AIA.config.image_quality,
163
- style: AIA.config.image_style)
164
- "Image generated and saved to: #{output_file}"
263
+ image = RubyLLM.paint(text_prompt, size: AIA.config.image_size)
264
+ if image_name
265
+ image_path = image.save(image_name)
266
+ "Image generated and saved to: #{image_path}"
267
+ else
268
+ "Image generated and available at: #{image.url}"
269
+ end
165
270
  rescue => e
166
271
  "Error generating image: #{e.message}"
167
272
  end
168
273
  end
169
274
 
170
275
  def image_to_text(prompt)
171
- image_path = extract_image_path(prompt)
276
+ image_path = extract_image_path(prompt)
172
277
  text_prompt = extract_text_prompt(prompt)
173
278
 
174
279
  if image_path && File.exist?(image_path)
175
280
  begin
176
- @chat.ask(text_prompt, with: { image: image_path })
281
+ @chat.ask(text_prompt, with: image_path).content
177
282
  rescue => e
178
283
  "Error analyzing image: #{e.message}"
179
284
  end
@@ -182,13 +287,21 @@ module AIA
182
287
  end
183
288
  end
184
289
 
290
+
291
+ #########################################
292
+ ## audio
293
+
294
+ def audio_file?(filepath)
295
+ filepath.to_s.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
296
+ end
297
+
185
298
  def text_to_audio(prompt)
186
299
  text_prompt = extract_text_prompt(prompt)
187
300
  output_file = "#{Time.now.to_i}.mp3"
188
301
 
189
302
  begin
190
303
  # Note: RubyLLM doesn't have a direct TTS feature
191
- # This is a placeholder for a custom implementation
304
+ # TODO: This is a placeholder for a custom implementation
192
305
  File.write(output_file, text_prompt)
193
306
  system("#{AIA.config.speak_command} #{output_file}") if File.exist?(output_file) && system("which #{AIA.config.speak_command} > /dev/null 2>&1")
194
307
  "Audio generated and saved to: #{output_file}"
@@ -198,10 +311,22 @@ module AIA
198
311
  end
199
312
 
200
313
  def audio_to_text(prompt)
201
- if prompt.is_a?(String) && File.exist?(prompt) &&
202
- prompt.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
314
+ text_prompt = extract_text_prompt(prompt)
315
+ text_prompt = 'Transcribe this audio' if text_prompt.nil? || text_prompt.empty?
316
+
317
+ # TODO: I don't think that "prompt" would ever be an audio filepath.
318
+ # Check prompt to see if it is a PromptManager object that has context_files
319
+
320
+ if prompt.is_a?(String) &&
321
+ File.exist?(prompt) &&
322
+ audio_file?(prompt)
203
323
  begin
204
- @chat.ask("Transcribe this audio", with: { audio: prompt })
324
+ response = if AIA.config.context_files.empty?
325
+ @chat.ask(text_prompt)
326
+ else
327
+ @chat.ask(text_prompt, with: AIA.config.context_files)
328
+ end
329
+ response.content
205
330
  rescue => e
206
331
  "Error transcribing audio: #{e.message}"
207
332
  end
@@ -210,15 +335,7 @@ module AIA
210
335
  text_to_text(prompt)
211
336
  end
212
337
  end
213
-
214
- def extract_image_path(prompt)
215
- if prompt.is_a?(String)
216
- prompt.scan(/\b[\w\/\.\-]+\.(jpg|jpeg|png|gif|webp)\b/i).first&.first
217
- elsif prompt.is_a?(Hash)
218
- prompt[:image] || prompt[:image_path]
219
- else
220
- nil
221
- end
222
- end
223
338
  end
224
339
  end
340
+
341
+ __END__
data/lib/aia/session.rb CHANGED
@@ -123,16 +123,13 @@ module AIA
123
123
  prompt_text = "#{prompt_text}\n\nContext:\n#{context}"
124
124
  end
125
125
 
126
- # Determine operation type
127
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
128
-
129
126
  # Add initial user prompt to context *before* sending to AI
130
127
  @context_manager.add_to_context(role: 'user', content: prompt_text)
131
128
 
132
129
  # Process the initial prompt
133
130
  @ui_presenter.display_thinking_animation
134
131
  # Send the current context (which includes the user prompt)
135
- response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
132
+ response = @chat_processor.process_prompt(@context_manager.get_context)
136
133
 
137
134
  # Add AI response to context
138
135
  @context_manager.add_to_context(role: 'assistant', content: response)
@@ -202,9 +199,8 @@ module AIA
202
199
  @context_manager.add_to_context(role: 'user', content: context)
203
200
 
204
201
  # Process the context
205
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
206
202
  @ui_presenter.display_thinking_animation
207
- response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
203
+ response = @chat_processor.process_prompt(@context_manager.get_context)
208
204
 
209
205
  # Add AI response to context
210
206
  @context_manager.add_to_context(role: 'assistant', content: response)
@@ -228,9 +224,8 @@ module AIA
228
224
 
229
225
  @context_manager.add_to_context(role: 'user', content: processed_input)
230
226
 
231
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
232
227
  @ui_presenter.display_thinking_animation
233
- response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
228
+ response = @chat_processor.process_prompt(@context_manager.get_context)
234
229
 
235
230
  @context_manager.add_to_context(role: 'assistant', content: response)
236
231
  @chat_processor.output_response(response)
@@ -273,7 +268,7 @@ module AIA
273
268
  begin
274
269
  AIA.config.client = AIA::RubyLLMAdapter.new
275
270
  rescue => e
276
- SYSERR.puts "Error reinitializing client: #{e.message}"
271
+ STDERR.puts "Error reinitializing client: #{e.message}"
277
272
  end
278
273
 
279
274
  @ui_presenter.display_info("Chat context cleared.")
@@ -292,9 +287,8 @@ module AIA
292
287
  @context_manager.add_to_context(role: 'user', content: processed_prompt)
293
288
  conversation = @context_manager.get_context
294
289
 
295
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
296
290
  @ui_presenter.display_thinking_animation
297
- response = @chat_processor.process_prompt(conversation, operation_type)
291
+ response = @chat_processor.process_prompt(conversation)
298
292
 
299
293
  @ui_presenter.display_ai_response(response)
300
294
  @context_manager.add_to_context(role: 'assistant', content: response)
data/lib/aia/utility.rb CHANGED
@@ -1,23 +1,33 @@
1
1
  # lib/aia/utility.rb
2
2
 
3
+ require 'word_wrapper' # Pure ruby word wrapping
4
+
3
5
  module AIA
4
6
  class Utility
5
7
  class << self
6
8
  # Displays the AIA robot ASCII art
9
+ # Yes, its slightly frivolous but it does contain some
10
+ # useful configuration information.
7
11
  def robot
12
+ indent = 18
13
+ spaces = " "*indent
14
+ width = TTY::Screen.width - indent - 2
15
+
8
16
  puts <<-ROBOT
9
17
 
10
18
  , ,
11
- (\\____/) AI Assistant
19
+ (\\____/) AI Assistant (v#{AIA::VERSION}) is Online
12
20
  (_oo_) #{AIA.config.model}
13
- (O) is Online
14
- __||__ \\) using #{AIA.config.adapter}
15
- [/______\\] /
16
- / \\__AI__/ \\/
21
+ (O) using #{AIA.config.adapter} (v#{RubyLLM::VERSION})
22
+ __||__ \\) model db was last refreshed on
23
+ [/______\\] / #{AIA.config.last_refresh}
24
+ / \\__AI__/ \\/ #{AIA.config.tool_paths.empty? ? 'I forgot my toolbox' : 'I brought some tools'}
17
25
  / /__\\
18
- (\\ /____\\
19
-
26
+ (\\ /____\\ #{AIA.config.tool_paths.empty? ? '' : 'My Toolbox contains:'}
20
27
  ROBOT
28
+ if AIA.config.tools
29
+ puts WordWrapper::MinimumRaggedness.new(width, AIA.config.tools).wrap.split("\n").map{|s| spaces+s+"\n"}.join
30
+ end
21
31
  end
22
32
  end
23
33
  end
data/lib/aia.rb CHANGED
@@ -5,7 +5,6 @@
5
5
  # provides an interface for interacting with AI models and managing prompts.
6
6
 
7
7
  require 'ruby_llm'
8
-
9
8
  require 'prompt_manager'
10
9
 
11
10
  require 'debug_me'
@@ -13,7 +12,14 @@ include DebugMe
13
12
  $DEBUG_ME = false
14
13
  DebugMeDefaultOptions[:skip1] = true
15
14
 
16
- require_relative 'extensions/openstruct_merge'
15
+ require_relative 'extensions/openstruct_merge' # adds self.merge self.get_value
16
+ require_relative 'extensions/ruby_llm/modalities' # adds model.modalities.supports? :text-to-text etc.
17
+
18
+ require_relative 'refinements/string.rb' # adds #include_any? #include_all?
19
+
20
+
21
+
22
+
17
23
  require_relative 'aia/utility'
18
24
  require_relative 'aia/version'
19
25
  require_relative 'aia/config'
@@ -80,12 +86,21 @@ module AIA
80
86
  prompt_handler = PromptHandler.new
81
87
 
82
88
  # Initialize the appropriate client adapter based on configuration
83
- @config.client = if @config.adapter == 'ruby_llm'
89
+ @config.client = if 'ruby_llm' == @config.adapter
84
90
  RubyLLMAdapter.new
85
91
  else
86
- AIClientAdapter.new
92
+ # TODO: ?? some other LLM API wrapper
93
+ STDERR.puts "ERROR: There is no adapter for #{@config.adapter}"
94
+ exit 1
87
95
  end
88
96
 
97
+ # There are two kinds of sessions: batch and chat
98
+ # A chat session is started when the --chat CLI option is used
99
+ # BUT its also possible to start a chat session with an initial prompt AND
100
+ # within that initial prompt there can be a workflow (aka pipeline)
101
+ # defined. If that is the case, then the chat session will not start
102
+ # until the initial prompt has completed its workflow.
103
+
89
104
  session = Session.new(prompt_handler)
90
105
 
91
106
  session.start
@@ -0,0 +1,26 @@
1
+ # lib/extensions/ruby_llm/modalities.rb
2
+ # A models "modes" are often expressed in terms like:
3
+ # text-to-text
4
+ # text_to_audio
5
+ # audio to image
6
+ # image2image
7
+ # This new supports? method tests the models modalities against
8
+ # these common expressions
9
+
10
+ class RubyLLM::Model::Modalities
11
+ def supports?(query_mode)
12
+ parts = query_mode
13
+ .to_s
14
+ .downcase
15
+ .split(/2|-to-| to |_to_/)
16
+ .map(&:strip)
17
+
18
+ if 2 == parts.size
19
+ input.include?(parts[0]) && output.include?(parts[1])
20
+ elsif 1 == parts.size
21
+ input.include?(parts[0]) || output.include?(parts[0])
22
+ else
23
+ false
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,16 @@
1
+ # lib/aia_refinements/string.rb
2
+
3
+
4
+ module Refinements
5
+ refine String do
6
+ def include_all?(substrings)
7
+ Array(substrings).all? { |substring| self.include?(substring) }
8
+ end
9
+ alias :all? :include_all?
10
+
11
+ def include_any?(substrings)
12
+ Array(substrings).any? { |substring| self.include?(substring) }
13
+ end
14
+ alias :any? :include_any?
15
+ end
16
+ end